; RUN: opt < %s -passes=instcombine -disable-output
-@X = global i32 5 ; <i32*> [#uses=1]
+@X = global i32 5 ; <ptr> [#uses=1]
define i64 @test() {
%C = add i64 1, 2 ; <i64> [#uses=1]
- %V = add i64 ptrtoint (i32* @X to i64), %C ; <i64> [#uses=1]
+ %V = add i64 ptrtoint (ptr @X to i64), %C ; <i64> [#uses=1]
ret i64 %V
}
; END.
target datalayout = "e-p:32:32"
-@silly = external constant i32 ; <i32*> [#uses=1]
+@silly = external constant i32 ; <ptr> [#uses=1]
-declare void @bzero(i8*, i32)
+declare void @bzero(ptr, i32)
-declare void @bcopy(i8*, i8*, i32)
+declare void @bcopy(ptr, ptr, i32)
-declare i32 @bcmp(i8*, i8*, i32)
+declare i32 @bcmp(ptr, ptr, i32)
-declare i32 @fputs(i8*, i8*)
+declare i32 @fputs(ptr, ptr)
-declare i32 @fputs_unlocked(i8*, i8*)
+declare i32 @fputs_unlocked(ptr, ptr)
define i32 @function(i32 %a.1) {
entry:
- %a.0 = alloca i32 ; <i32*> [#uses=2]
- %result = alloca i32 ; <i32*> [#uses=2]
- store i32 %a.1, i32* %a.0
- %tmp.0 = load i32, i32* %a.0 ; <i32> [#uses=1]
- %tmp.1 = load i32, i32* @silly ; <i32> [#uses=1]
+ %a.0 = alloca i32 ; <ptr> [#uses=2]
+ %result = alloca i32 ; <ptr> [#uses=2]
+ store i32 %a.1, ptr %a.0
+ %tmp.0 = load i32, ptr %a.0 ; <i32> [#uses=1]
+ %tmp.1 = load i32, ptr @silly ; <i32> [#uses=1]
%tmp.2 = add i32 %tmp.0, %tmp.1 ; <i32> [#uses=1]
- store i32 %tmp.2, i32* %result
+ store i32 %tmp.2, ptr %result
br label %return
return: ; preds = %entry
- %tmp.3 = load i32, i32* %result ; <i32> [#uses=1]
+ %tmp.3 = load i32, ptr %result ; <i32> [#uses=1]
ret i32 %tmp.3
}
define i32 @oof() {
entry:
- %live_head = alloca i32 ; <i32*> [#uses=2]
- %tmp.1 = icmp ne i32* %live_head, null ; <i1> [#uses=1]
+ %live_head = alloca i32 ; <ptr> [#uses=2]
+ %tmp.1 = icmp ne ptr %live_head, null ; <i1> [#uses=1]
br i1 %tmp.1, label %then, label %UnifiedExitNode
then: ; preds = %entry
- %tmp.4 = call i32 (...) @bitmap_clear( i32* %live_head ) ; <i32> [#uses=0]
+ %tmp.4 = call i32 (...) @bitmap_clear( ptr %live_head ) ; <i32> [#uses=0]
br label %UnifiedExitNode
UnifiedExitNode: ; preds = %then, %entry
; RUN: opt < %s -passes=instcombine -S | grep load
-define void @test(i32* %P) {
+define void @test(ptr %P) {
; Dead but not deletable!
- %X = load volatile i32, i32* %P ; <i32> [#uses=0]
+ %X = load volatile i32, ptr %P ; <i32> [#uses=0]
ret void
}
; RUN: opt < %s -passes=instcombine -disable-output
-declare i32* @bar()
+declare ptr @bar()
-define float* @foo() personality i32 (...)* @__gxx_personality_v0 {
- %tmp.11 = invoke float* bitcast (i32* ()* @bar to float* ()*)( )
- to label %invoke_cont unwind label %X ; <float*> [#uses=1]
+define ptr @foo() personality ptr @__gxx_personality_v0 {
+ %tmp.11 = invoke ptr @bar( )
+ to label %invoke_cont unwind label %X ; <ptr> [#uses=1]
invoke_cont: ; preds = %0
- ret float* %tmp.11
+ ret ptr %tmp.11
X: ; preds = %0
- %exn = landingpad {i8*, i32}
+ %exn = landingpad {ptr, i32}
cleanup
- ret float* null
+ ret ptr null
}
declare i32 @__gxx_personality_v0(...)
declare void @foo(...)
define void @test(i64 %X) {
- %Y = inttoptr i64 %X to i32* ; <i32*> [#uses=1]
- call void (...) @foo( i32* %Y )
+ %Y = inttoptr i64 %X to ptr ; <ptr> [#uses=1]
+ call void (...) @foo( ptr %Y )
ret void
}
;
; RUN: opt < %s -passes=instcombine -disable-output
-declare i8* @test()
+declare ptr @test()
-define i32 @foo() personality i32 (...)* @__gxx_personality_v0 {
+define i32 @foo() personality ptr @__gxx_personality_v0 {
entry:
br i1 true, label %cont, label %call
call: ; preds = %entry
- %P = invoke i32* bitcast (i8* ()* @test to i32* ()*)( )
- to label %cont unwind label %N ; <i32*> [#uses=1]
+ %P = invoke ptr @test( )
+ to label %cont unwind label %N ; <ptr> [#uses=1]
cont: ; preds = %call, %entry
- %P2 = phi i32* [ %P, %call ], [ null, %entry ] ; <i32*> [#uses=1]
- %V = load i32, i32* %P2 ; <i32> [#uses=1]
+ %P2 = phi ptr [ %P, %call ], [ null, %entry ] ; <ptr> [#uses=1]
+ %V = load i32, ptr %P2 ; <i32> [#uses=1]
ret i32 %V
N: ; preds = %call
- %exn = landingpad {i8*, i32}
+ %exn = landingpad {ptr, i32}
cleanup
ret i32 0
}
%Ty = type opaque
-define i32 @test(%Ty* %X) {
- %Y = bitcast %Ty* %X to i32* ; <i32*> [#uses=1]
- %Z = load i32, i32* %Y ; <i32> [#uses=1]
+define i32 @test(ptr %X) {
+ %Z = load i32, ptr %X ; <i32> [#uses=1]
ret i32 %Z
}
; RUN: opt < %s -passes=instcombine -disable-output
-@p = weak global i32 0 ; <i32*> [#uses=1]
+@p = weak global i32 0 ; <ptr> [#uses=1]
define i32 @test(i32 %x) {
- %y = mul i32 %x, ptrtoint (i32* @p to i32) ; <i32> [#uses=1]
+ %y = mul i32 %x, ptrtoint (ptr @p to i32) ; <i32> [#uses=1]
ret i32 %y
}
; so could produce incorrect results!
define i32 @test(i1 %C) {
- %X = alloca i32 ; <i32*> [#uses=3]
- %X2 = alloca i32 ; <i32*> [#uses=2]
- store i32 1, i32* %X
- store i32 2, i32* %X2
- %Y = select i1 %C, i32* %X, i32* %X2 ; <i32*> [#uses=1]
- store i32 3, i32* %X
- %Z = load i32, i32* %Y ; <i32> [#uses=1]
+ %X = alloca i32 ; <ptr> [#uses=3]
+ %X2 = alloca i32 ; <ptr> [#uses=2]
+ store i32 1, ptr %X
+ store i32 2, ptr %X2
+ %Y = select i1 %C, ptr %X, ptr %X2 ; <ptr> [#uses=1]
+ store i32 3, ptr %X
+ %Z = load i32, ptr %Y ; <i32> [#uses=1]
ret i32 %Z
}
define i32 @test(i1 %C) {
entry:
- %X = alloca i32 ; <i32*> [#uses=3]
- %X2 = alloca i32 ; <i32*> [#uses=2]
- store i32 1, i32* %X
- store i32 2, i32* %X2
+ %X = alloca i32 ; <ptr> [#uses=3]
+ %X2 = alloca i32 ; <ptr> [#uses=2]
+ store i32 1, ptr %X
+ store i32 2, ptr %X2
br i1 %C, label %cond_true.i, label %cond_continue.i
cond_true.i: ; preds = %entry
br label %cond_continue.i
cond_continue.i: ; preds = %cond_true.i, %entry
- %mem_tmp.i.0 = phi i32* [ %X, %cond_true.i ], [ %X2, %entry ] ; <i32*> [#uses=1]
- store i32 3, i32* %X
- %tmp.3 = load i32, i32* %mem_tmp.i.0 ; <i32> [#uses=1]
+ %mem_tmp.i.0 = phi ptr [ %X, %cond_true.i ], [ %X2, %entry ] ; <ptr> [#uses=1]
+ store i32 3, ptr %X
+ %tmp.3 = load i32, ptr %mem_tmp.i.0 ; <i32> [#uses=1]
ret i32 %tmp.3
}
; PR28011 - https://llvm.org/bugs/show_bug.cgi?id=28011
; The above transform only applies to scalar integers; it shouldn't be attempted for constant expressions or vectors.
-@a = common global i32** null
+@a = common global ptr null
@b = common global [1 x i32] zeroinitializer
define i1 @PR28011(i16 %a) {
; CHECK-LABEL: @PR28011(
; CHECK-NEXT: [[CONV:%.*]] = sext i16 [[A:%.*]] to i32
-; CHECK-NEXT: [[CMP:%.*]] = icmp ne i32 [[CONV]], or (i32 zext (i1 icmp ne (i32*** bitcast ([1 x i32]* @b to i32***), i32*** @a) to i32), i32 1)
+; CHECK-NEXT: [[CMP:%.*]] = icmp ne i32 [[CONV]], or (i32 zext (i1 icmp ne (ptr @b, ptr @a) to i32), i32 1)
; CHECK-NEXT: ret i1 [[CMP]]
;
%conv = sext i16 %a to i32
- %cmp = icmp ne i32 %conv, or (i32 zext (i1 icmp ne (i32*** bitcast ([1 x i32]* @b to i32***), i32*** @a) to i32), i32 1)
+ %cmp = icmp ne i32 %conv, or (i32 zext (i1 icmp ne (ptr @b, ptr @a) to i32), i32 1)
ret i1 %cmp
}
; This example caused instcombine to spin into an infinite loop.
-define void @test(i32* %P) {
+define void @test(ptr %P) {
ret void
Dead: ; preds = %Dead
%X = phi i32 [ %Y, %Dead ] ; <i32> [#uses=1]
%Y = sdiv i32 %X, 10 ; <i32> [#uses=2]
- store i32 %Y, i32* %P
+ store i32 %Y, ptr %P
br label %Dead
}
; RUN: opt < %s -passes=instcombine -S | grep and
; PR913
-define i32 @test(i32* %tmp1) {
- %tmp.i = load i32, i32* %tmp1 ; <i32> [#uses=1]
+define i32 @test(ptr %tmp1) {
+ %tmp.i = load i32, ptr %tmp1 ; <i32> [#uses=1]
%tmp = bitcast i32 %tmp.i to i32 ; <i32> [#uses=1]
%tmp2.ui = lshr i32 %tmp, 5 ; <i32> [#uses=1]
%tmp2 = bitcast i32 %tmp2.ui to i32 ; <i32> [#uses=1]
define i32 @visible(i32 %direction, i64 %p1.0, i64 %p2.0, i64 %p3.0) {
entry:
- %p1_addr = alloca %struct.point ; <%struct.point*> [#uses=2]
- %p2_addr = alloca %struct.point ; <%struct.point*> [#uses=2]
- %p3_addr = alloca %struct.point ; <%struct.point*> [#uses=2]
+ %p1_addr = alloca %struct.point ; <ptr> [#uses=2]
+ %p2_addr = alloca %struct.point ; <ptr> [#uses=2]
+ %p3_addr = alloca %struct.point ; <ptr> [#uses=2]
%"alloca point" = bitcast i32 0 to i32 ; <i32> [#uses=0]
- %tmp = bitcast %struct.point* %p1_addr to { i64 }* ; <{ i64 }*> [#uses=1]
- %tmp.upgrd.1 = getelementptr { i64 }, { i64 }* %tmp, i64 0, i32 0 ; <i64*> [#uses=1]
- store i64 %p1.0, i64* %tmp.upgrd.1
- %tmp1 = bitcast %struct.point* %p2_addr to { i64 }* ; <{ i64 }*> [#uses=1]
- %tmp2 = getelementptr { i64 }, { i64 }* %tmp1, i64 0, i32 0 ; <i64*> [#uses=1]
- store i64 %p2.0, i64* %tmp2
- %tmp3 = bitcast %struct.point* %p3_addr to { i64 }* ; <{ i64 }*> [#uses=1]
- %tmp4 = getelementptr { i64 }, { i64 }* %tmp3, i64 0, i32 0 ; <i64*> [#uses=1]
- store i64 %p3.0, i64* %tmp4
+ %tmp.upgrd.1 = getelementptr { i64 }, ptr %p1_addr, i64 0, i32 0 ; <ptr> [#uses=1]
+ store i64 %p1.0, ptr %tmp.upgrd.1
+ %tmp2 = getelementptr { i64 }, ptr %p2_addr, i64 0, i32 0 ; <ptr> [#uses=1]
+ store i64 %p2.0, ptr %tmp2
+ %tmp4 = getelementptr { i64 }, ptr %p3_addr, i64 0, i32 0 ; <ptr> [#uses=1]
+ store i64 %p3.0, ptr %tmp4
%tmp.upgrd.2 = icmp eq i32 %direction, 0 ; <i1> [#uses=1]
- %tmp5 = bitcast %struct.point* %p1_addr to { i64 }* ; <{ i64 }*> [#uses=1]
- %tmp6 = getelementptr { i64 }, { i64 }* %tmp5, i64 0, i32 0 ; <i64*> [#uses=1]
- %tmp.upgrd.3 = load i64, i64* %tmp6 ; <i64> [#uses=1]
- %tmp7 = bitcast %struct.point* %p2_addr to { i64 }* ; <{ i64 }*> [#uses=1]
- %tmp8 = getelementptr { i64 }, { i64 }* %tmp7, i64 0, i32 0 ; <i64*> [#uses=1]
- %tmp9 = load i64, i64* %tmp8 ; <i64> [#uses=1]
- %tmp10 = bitcast %struct.point* %p3_addr to { i64 }* ; <{ i64 }*> [#uses=1]
- %tmp11 = getelementptr { i64 }, { i64 }* %tmp10, i64 0, i32 0 ; <i64*> [#uses=1]
- %tmp12 = load i64, i64* %tmp11 ; <i64> [#uses=1]
+ %tmp6 = getelementptr { i64 }, ptr %p1_addr, i64 0, i32 0 ; <ptr> [#uses=1]
+ %tmp.upgrd.3 = load i64, ptr %tmp6 ; <i64> [#uses=1]
+ %tmp8 = getelementptr { i64 }, ptr %p2_addr, i64 0, i32 0 ; <ptr> [#uses=1]
+ %tmp9 = load i64, ptr %tmp8 ; <i64> [#uses=1]
+ %tmp11 = getelementptr { i64 }, ptr %p3_addr, i64 0, i32 0 ; <ptr> [#uses=1]
+ %tmp12 = load i64, ptr %tmp11 ; <i64> [#uses=1]
%tmp13 = call i32 @determinant( i64 %tmp.upgrd.3, i64 %tmp9, i64 %tmp12 ) ; <i32> [#uses=2]
br i1 %tmp.upgrd.2, label %cond_true, label %cond_false
define i32 @visible(i32 %direction, i64 %p1.0, i64 %p2.0, i64 %p3.0) {
entry:
- %p1_addr = alloca %struct.point ; <%struct.point*> [#uses=2]
- %p2_addr = alloca %struct.point ; <%struct.point*> [#uses=2]
- %p3_addr = alloca %struct.point ; <%struct.point*> [#uses=2]
- %tmp = bitcast %struct.point* %p1_addr to { i64 }* ; <{ i64 }*> [#uses=1]
- %tmp.upgrd.1 = getelementptr { i64 }, { i64 }* %tmp, i32 0, i32 0 ; <i64*> [#uses=1]
- store i64 %p1.0, i64* %tmp.upgrd.1
- %tmp1 = bitcast %struct.point* %p2_addr to { i64 }* ; <{ i64 }*> [#uses=1]
- %tmp2 = getelementptr { i64 }, { i64 }* %tmp1, i32 0, i32 0 ; <i64*> [#uses=1]
- store i64 %p2.0, i64* %tmp2
- %tmp3 = bitcast %struct.point* %p3_addr to { i64 }* ; <{ i64 }*> [#uses=1]
- %tmp4 = getelementptr { i64 }, { i64 }* %tmp3, i32 0, i32 0 ; <i64*> [#uses=1]
- store i64 %p3.0, i64* %tmp4
+ %p1_addr = alloca %struct.point ; <ptr> [#uses=2]
+ %p2_addr = alloca %struct.point ; <ptr> [#uses=2]
+ %p3_addr = alloca %struct.point ; <ptr> [#uses=2]
+ %tmp.upgrd.1 = getelementptr { i64 }, ptr %p1_addr, i32 0, i32 0 ; <ptr> [#uses=1]
+ store i64 %p1.0, ptr %tmp.upgrd.1
+ %tmp2 = getelementptr { i64 }, ptr %p2_addr, i32 0, i32 0 ; <ptr> [#uses=1]
+ store i64 %p2.0, ptr %tmp2
+ %tmp4 = getelementptr { i64 }, ptr %p3_addr, i32 0, i32 0 ; <ptr> [#uses=1]
+ store i64 %p3.0, ptr %tmp4
%tmp.upgrd.2 = icmp eq i32 %direction, 0 ; <i1> [#uses=1]
- %tmp5 = bitcast %struct.point* %p1_addr to { i64 }* ; <{ i64 }*> [#uses=1]
- %tmp6 = getelementptr { i64 }, { i64 }* %tmp5, i32 0, i32 0 ; <i64*> [#uses=1]
- %tmp.upgrd.3 = load i64, i64* %tmp6 ; <i64> [#uses=1]
- %tmp7 = bitcast %struct.point* %p2_addr to { i64 }* ; <{ i64 }*> [#uses=1]
- %tmp8 = getelementptr { i64 }, { i64 }* %tmp7, i32 0, i32 0 ; <i64*> [#uses=1]
- %tmp9 = load i64, i64* %tmp8 ; <i64> [#uses=1]
- %tmp10 = bitcast %struct.point* %p3_addr to { i64 }* ; <{ i64 }*> [#uses=1]
- %tmp11 = getelementptr { i64 }, { i64 }* %tmp10, i32 0, i32 0 ; <i64*> [#uses=1]
- %tmp12 = load i64, i64* %tmp11 ; <i64> [#uses=1]
+ %tmp6 = getelementptr { i64 }, ptr %p1_addr, i32 0, i32 0 ; <ptr> [#uses=1]
+ %tmp.upgrd.3 = load i64, ptr %tmp6 ; <i64> [#uses=1]
+ %tmp8 = getelementptr { i64 }, ptr %p2_addr, i32 0, i32 0 ; <ptr> [#uses=1]
+ %tmp9 = load i64, ptr %tmp8 ; <i64> [#uses=1]
+ %tmp11 = getelementptr { i64 }, ptr %p3_addr, i32 0, i32 0 ; <ptr> [#uses=1]
+ %tmp12 = load i64, ptr %tmp11 ; <i64> [#uses=1]
%tmp13 = call i32 @determinant( i64 %tmp.upgrd.3, i64 %tmp9, i64 %tmp12 ) ; <i32> [#uses=2]
%tmp14 = icmp slt i32 %tmp13, 0 ; <i1> [#uses=1]
%tmp26 = icmp sgt i32 %tmp13, 0 ; <i1> [#uses=1]
target datalayout = "e-p:32:32"
target triple = "i686-pc-linux-gnu"
-@r = external global [17 x i32] ; <[17 x i32]*> [#uses=1]
+@r = external global [17 x i32] ; <ptr> [#uses=1]
-define i1 @print_pgm_cond_true(i32 %tmp12.reload, i32* %tmp16.out) {
+define i1 @print_pgm_cond_true(i32 %tmp12.reload, ptr %tmp16.out) {
; CHECK-LABEL: @print_pgm_cond_true(
; CHECK-NEXT: newFuncRoot:
; CHECK-NEXT: br label [[COND_TRUE:%.*]]
; CHECK: bb27.exitStub:
-; CHECK-NEXT: store i32 [[TMP16:%.*]], i32* [[TMP16_OUT:%.*]], align 4
+; CHECK-NEXT: store i32 [[TMP16:%.*]], ptr [[TMP16_OUT:%.*]], align 4
; CHECK-NEXT: ret i1 true
; CHECK: cond_next23.exitStub:
-; CHECK-NEXT: store i32 [[TMP16]], i32* [[TMP16_OUT]], align 4
+; CHECK-NEXT: store i32 [[TMP16]], ptr [[TMP16_OUT]], align 4
; CHECK-NEXT: ret i1 false
; CHECK: cond_true:
-; CHECK-NEXT: [[TMP15:%.*]] = getelementptr [17 x i32], [17 x i32]* @r, i32 0, i32 [[TMP12_RELOAD:%.*]]
-; CHECK-NEXT: [[TMP16]] = load i32, i32* [[TMP15]], align 4
+; CHECK-NEXT: [[TMP15:%.*]] = getelementptr [17 x i32], ptr @r, i32 0, i32 [[TMP12_RELOAD:%.*]]
+; CHECK-NEXT: [[TMP16]] = load i32, ptr [[TMP15]], align 4
; CHECK-NEXT: [[TMP0:%.*]] = add i32 [[TMP16]], -32
; CHECK-NEXT: [[TMP1:%.*]] = icmp ult i32 [[TMP0]], -63
; CHECK-NEXT: br i1 [[TMP1]], label [[BB27_EXITSTUB:%.*]], label [[COND_NEXT23_EXITSTUB:%.*]]
br label %cond_true
bb27.exitStub: ; preds = %cond_true
- store i32 %tmp16, i32* %tmp16.out
+ store i32 %tmp16, ptr %tmp16.out
ret i1 true
cond_next23.exitStub: ; preds = %cond_true
- store i32 %tmp16, i32* %tmp16.out
+ store i32 %tmp16, ptr %tmp16.out
ret i1 false
cond_true: ; preds = %newFuncRoot
- %tmp15 = getelementptr [17 x i32], [17 x i32]* @r, i32 0, i32 %tmp12.reload ; <i32*> [#uses=1]
- %tmp16 = load i32, i32* %tmp15 ; <i32> [#uses=4]
+ %tmp15 = getelementptr [17 x i32], ptr @r, i32 0, i32 %tmp12.reload ; <ptr> [#uses=1]
+ %tmp16 = load i32, ptr %tmp15 ; <i32> [#uses=4]
%tmp18 = icmp slt i32 %tmp16, -31 ; <i1> [#uses=1]
%tmp21 = icmp sgt i32 %tmp16, 31 ; <i1> [#uses=1]
%bothcond = or i1 %tmp18, %tmp21 ; <i1> [#uses=1]
br i1 %bothcond, label %bb27.exitStub, label %cond_next23.exitStub
}
-define i1 @print_pgm_cond_true_logical(i32 %tmp12.reload, i32* %tmp16.out) {
+define i1 @print_pgm_cond_true_logical(i32 %tmp12.reload, ptr %tmp16.out) {
; CHECK-LABEL: @print_pgm_cond_true_logical(
; CHECK-NEXT: newFuncRoot:
; CHECK-NEXT: br label [[COND_TRUE:%.*]]
; CHECK: bb27.exitStub:
-; CHECK-NEXT: store i32 [[TMP16:%.*]], i32* [[TMP16_OUT:%.*]], align 4
+; CHECK-NEXT: store i32 [[TMP16:%.*]], ptr [[TMP16_OUT:%.*]], align 4
; CHECK-NEXT: ret i1 true
; CHECK: cond_next23.exitStub:
-; CHECK-NEXT: store i32 [[TMP16]], i32* [[TMP16_OUT]], align 4
+; CHECK-NEXT: store i32 [[TMP16]], ptr [[TMP16_OUT]], align 4
; CHECK-NEXT: ret i1 false
; CHECK: cond_true:
-; CHECK-NEXT: [[TMP15:%.*]] = getelementptr [17 x i32], [17 x i32]* @r, i32 0, i32 [[TMP12_RELOAD:%.*]]
-; CHECK-NEXT: [[TMP16]] = load i32, i32* [[TMP15]], align 4
+; CHECK-NEXT: [[TMP15:%.*]] = getelementptr [17 x i32], ptr @r, i32 0, i32 [[TMP12_RELOAD:%.*]]
+; CHECK-NEXT: [[TMP16]] = load i32, ptr [[TMP15]], align 4
; CHECK-NEXT: [[TMP0:%.*]] = add i32 [[TMP16]], -32
; CHECK-NEXT: [[TMP1:%.*]] = icmp ult i32 [[TMP0]], -63
; CHECK-NEXT: br i1 [[TMP1]], label [[BB27_EXITSTUB:%.*]], label [[COND_NEXT23_EXITSTUB:%.*]]
br label %cond_true
bb27.exitStub: ; preds = %cond_true
- store i32 %tmp16, i32* %tmp16.out
+ store i32 %tmp16, ptr %tmp16.out
ret i1 true
cond_next23.exitStub: ; preds = %cond_true
- store i32 %tmp16, i32* %tmp16.out
+ store i32 %tmp16, ptr %tmp16.out
ret i1 false
cond_true: ; preds = %newFuncRoot
- %tmp15 = getelementptr [17 x i32], [17 x i32]* @r, i32 0, i32 %tmp12.reload ; <i32*> [#uses=1]
- %tmp16 = load i32, i32* %tmp15 ; <i32> [#uses=4]
+ %tmp15 = getelementptr [17 x i32], ptr @r, i32 0, i32 %tmp12.reload ; <ptr> [#uses=1]
+ %tmp16 = load i32, ptr %tmp15 ; <i32> [#uses=4]
%tmp18 = icmp slt i32 %tmp16, -31 ; <i1> [#uses=1]
%tmp21 = icmp sgt i32 %tmp16, 31 ; <i1> [#uses=1]
%bothcond = select i1 %tmp18, i1 true, i1 %tmp21 ; <i1> [#uses=1]
target datalayout = "e-p:32:32"
target triple = "i686-pc-linux-gnu"
%struct.internal_state = type { i32 }
- %struct.mng_data = type { i32, i8*, i32, i32, i32, i32, i32, i32, i32, i32, i32, i8, i32, i32, i32, i8, i32, i32, i32, i32, i16, i16, i16, i8, i8, double, double, double, i8, i8, i8, i8, i32, i32, i32, i32, i32, i8, i32, i32, i8*, i8* (i32)*, void (i8*, i32)*, void (i8*, i8*, i32)*, i8 (%struct.mng_data*)*, i8 (%struct.mng_data*)*, i8 (%struct.mng_data*, i8*, i32, i32*)*, i8 (%struct.mng_data*, i8*, i32, i32*)*, i8 (%struct.mng_data*, i32, i8, i32, i32, i32, i32, i8*)*, i8 (%struct.mng_data*, i32, i32, i8*)*, i8 (%struct.mng_data*, i32, i32)*, i8 (%struct.mng_data*, i8, i8*, i8*, i8*, i8*)*, i8 (%struct.mng_data*)*, i8 (%struct.mng_data*, i8*)*, i8 (%struct.mng_data*, i8*)*, i8 (%struct.mng_data*, i32, i32)*, i8 (%struct.mng_data*, i32, i32, i8*)*, i8 (%struct.mng_data*, i8, i8, i32, i32)*, i8* (%struct.mng_data*, i32)*, i8* (%struct.mng_data*, i32)*, i8* (%struct.mng_data*, i32)*, i8 (%struct.mng_data*, i32, i32, i32, i32)*, i32 (%struct.mng_data*)*, i8 (%struct.mng_data*, i32)*, i8 (%struct.mng_data*, i32)*, i8 (%struct.mng_data*, i32, i32, i32, i32, i32, i32, i32, i32)*, i8 (%struct.mng_data*, i8)*, i8 (%struct.mng_data*, i32, i8*)*, i8 (%struct.mng_data*, i32, i8, i8*)*, i8, i32, i32, i8*, i8*, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i32, i32, i8, i8, i8, i8, i8, i32, i8, i8, i8, i32, i8*, i32, i8*, i32, i8, i8, i8, i32, i8*, i8*, i32, i32, i8*, i8*, %struct.mng_pushdata*, %struct.mng_pushdata*, %struct.mng_pushdata*, %struct.mng_pushdata*, i8, i8, i32, i32, i8*, i8, i8, i32, i32, i32, i32, i32, i32, i8, i8, i8, i8, i32, i32, i8*, i32, i32, i32, i8, i8, i32, i32, i32, i32, i8, i8, i8, i8, i8, i8, i8, i8, i8, i32, i8*, i8*, i8*, i32, i8*, i8*, i8*, i8*, i8*, %struct.mng_savedata*, i32, i32, i32, i32, i8, i32, i32, i32, i32, i32, i32, i32, i32, i32, i32, i32, i32, i32, i32, i32, i32, i8*, i8*, i8*, i8, i8, i32, i32, i32, i32, i32, i32, i32, i32, i32, i8*, i8*, i8*, i8*, i8*, i8*, [256 x i8], double, void ()*, void ()*, void ()*, void ()*, void ()*, void ()*, void ()*, void ()*, void ()*, void ()*, void ()*, void ()*, i16, i8, i8, i8, i8, i8, i32, i32, i8, i32, i32, i32, i32, i16, i16, i16, i8, i16, i8, i32, i32, i32, i32, i8, i32, i32, i8, i32, i32, i32, i32, i8, i32, i32, i8, i32, i32, i32, i32, i32, i8, i32, i8, i16, i16, i16, i16, i32, [256 x %struct.mng_palette8e], i32, [256 x i8], i32, i32, i32, i32, i32, i32, i32, i32, i32, i8, i32, i8*, i16, i16, i16, i8*, i8, i8, i32, i32, i32, i32, i8, void ()*, void ()*, void ()*, void ()*, void ()*, void ()*, i8*, i8, i8, i8, i32, i8*, i8*, i16, i16, i16, i16, i32, i32, i8*, %struct.z_stream, i32, i32, i32, i32, i32, i32, i8, i8, [256 x i32], i8 }
+ %struct.mng_data = type { i32, ptr, i32, i32, i32, i32, i32, i32, i32, i32, i32, i8, i32, i32, i32, i8, i32, i32, i32, i32, i16, i16, i16, i8, i8, double, double, double, i8, i8, i8, i8, i32, i32, i32, i32, i32, i8, i32, i32, ptr, ptr, ptr, ptr, ptr, ptr, ptr, ptr, ptr, ptr, ptr, ptr, ptr, ptr, ptr, ptr, ptr, ptr, ptr, ptr, ptr, ptr, ptr, ptr, ptr, ptr, ptr, ptr, ptr, i8, i32, i32, ptr, ptr, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i32, i32, i8, i8, i8, i8, i8, i32, i8, i8, i8, i32, ptr, i32, ptr, i32, i8, i8, i8, i32, ptr, ptr, i32, i32, ptr, ptr, ptr, ptr, ptr, ptr, i8, i8, i32, i32, ptr, i8, i8, i32, i32, i32, i32, i32, i32, i8, i8, i8, i8, i32, i32, ptr, i32, i32, i32, i8, i8, i32, i32, i32, i32, i8, i8, i8, i8, i8, i8, i8, i8, i8, i32, ptr, ptr, ptr, i32, ptr, ptr, ptr, ptr, ptr, ptr, i32, i32, i32, i32, i8, i32, i32, i32, i32, i32, i32, i32, i32, i32, i32, i32, i32, i32, i32, i32, i32, ptr, ptr, ptr, i8, i8, i32, i32, i32, i32, i32, i32, i32, i32, i32, ptr, ptr, ptr, ptr, ptr, ptr, [256 x i8], double, ptr, ptr, ptr, ptr, ptr, ptr, ptr, ptr, ptr, ptr, ptr, ptr, i16, i8, i8, i8, i8, i8, i32, i32, i8, i32, i32, i32, i32, i16, i16, i16, i8, i16, i8, i32, i32, i32, i32, i8, i32, i32, i8, i32, i32, i32, i32, i8, i32, i32, i8, i32, i32, i32, i32, i32, i8, i32, i8, i16, i16, i16, i16, i32, [256 x %struct.mng_palette8e], i32, [256 x i8], i32, i32, i32, i32, i32, i32, i32, i32, i32, i8, i32, ptr, i16, i16, i16, ptr, i8, i8, i32, i32, i32, i32, i8, ptr, ptr, ptr, ptr, ptr, ptr, ptr, i8, i8, i8, i32, ptr, ptr, i16, i16, i16, i16, i32, i32, ptr, %struct.z_stream, i32, i32, i32, i32, i32, i32, i8, i8, [256 x i32], i8 }
%struct.mng_palette8e = type { i8, i8, i8 }
- %struct.mng_pushdata = type { i8*, i8*, i32, i8, i8*, i32 }
- %struct.mng_savedata = type { i8, i8, i8, i8, i8, i8, i8, i16, i16, i16, i8, i16, i8, i8, i32, i32, i8, i32, i32, i32, i32, i32, [256 x %struct.mng_palette8e], i32, [256 x i8], i32, i32, i32, i32, i32, i32, i32, i32, i32, i8, i32, i8*, i16, i16, i16 }
- %struct.z_stream = type { i8*, i32, i32, i8*, i32, i32, i8*, %struct.internal_state*, i8* (i8*, i32, i32)*, void (i8*, i8*)*, i8*, i32, i32, i32 }
+ %struct.mng_pushdata = type { ptr, ptr, i32, i8, ptr, i32 }
+ %struct.mng_savedata = type { i8, i8, i8, i8, i8, i8, i8, i16, i16, i16, i8, i16, i8, i8, i32, i32, i8, i32, i32, i32, i32, i32, [256 x %struct.mng_palette8e], i32, [256 x i8], i32, i32, i32, i32, i32, i32, i32, i32, i32, i8, i32, ptr, i16, i16, i16 }
+ %struct.z_stream = type { ptr, i32, i32, ptr, i32, i32, ptr, ptr, ptr, ptr, ptr, i32, i32, i32 }
-define void @mng_write_basi(i8* %src1, i16* %src2) {
+define void @mng_write_basi(ptr %src1, ptr %src2) {
entry:
- %tmp = load i8, i8* %src1 ; <i8> [#uses=1]
+ %tmp = load i8, ptr %src1 ; <i8> [#uses=1]
%tmp.upgrd.1 = icmp ugt i8 %tmp, 8 ; <i1> [#uses=1]
- %tmp.upgrd.2 = load i16, i16* %src2; <i16> [#uses=2]
+ %tmp.upgrd.2 = load i16, ptr %src2; <i16> [#uses=2]
%tmp3 = icmp eq i16 %tmp.upgrd.2, 255 ; <i1> [#uses=1]
%tmp7 = icmp eq i16 %tmp.upgrd.2, -1 ; <i1> [#uses=1]
%bOpaque.0.in = select i1 %tmp.upgrd.1, i1 %tmp7, i1 %tmp3 ; <i1> [#uses=1]
entry:
%A = alloca i32
%B = alloca i32
- %tmp = call i32 (...) @bar( i32* %A ) ; <i32> [#uses=0]
- %T = load i32, i32* %A ; <i32> [#uses=1]
+ %tmp = call i32 (...) @bar( ptr %A ) ; <i32> [#uses=0]
+ %T = load i32, ptr %A ; <i32> [#uses=1]
%tmp2 = icmp eq i32 %C, 0 ; <i1> [#uses=1]
br i1 %tmp2, label %cond_next, label %cond_true
cond_true: ; preds = %entry
- store i32 123, i32* %B
+ store i32 123, ptr %B
call i32 @test2( i32 123 ) ; <i32>:0 [#uses=0]
- %T1 = load i32, i32* %B ; <i32> [#uses=1]
+ %T1 = load i32, ptr %B ; <i32> [#uses=1]
br label %cond_next
cond_next: ; preds = %cond_true, %entry
; compile a kernel though...
target datalayout = "e-p:32:32"
-@str = internal constant [6 x i8] c"%llx\0A\00" ; <[6 x i8]*> [#uses=1]
+@str = internal constant [6 x i8] c"%llx\0A\00" ; <ptr> [#uses=1]
-declare i32 @printf(i8*, ...)
+declare i32 @printf(ptr, ...)
-define i32 @main(i32 %x, i8** %a) {
+define i32 @main(i32 %x, ptr %a) {
entry:
- %tmp = getelementptr [6 x i8], [6 x i8]* @str, i32 0, i64 0 ; <i8*> [#uses=1]
- %tmp1 = load i8*, i8** %a ; <i8*> [#uses=1]
- %tmp2 = ptrtoint i8* %tmp1 to i32 ; <i32> [#uses=1]
+ %tmp = getelementptr [6 x i8], ptr @str, i32 0, i64 0 ; <ptr> [#uses=1]
+ %tmp1 = load ptr, ptr %a ; <ptr> [#uses=1]
+ %tmp2 = ptrtoint ptr %tmp1 to i32 ; <i32> [#uses=1]
%tmp3 = zext i32 %tmp2 to i64 ; <i64> [#uses=1]
- %tmp.upgrd.1 = call i32 (i8*, ...) @printf( i8* %tmp, i64 %tmp3 ) ; <i32> [#uses=0]
+ %tmp.upgrd.1 = call i32 (ptr, ...) @printf( ptr %tmp, i64 %tmp3 ) ; <i32> [#uses=0]
ret i32 0
}
target datalayout = "e-p:32:32"
target triple = "i686-pc-linux-gnu"
- %struct.termbox = type { %struct.termbox*, i32, i32, i32, i32, i32 }
+ %struct.termbox = type { ptr, i32, i32, i32, i32, i32 }
define void @ggenorien() {
entry:
- %tmp68 = icmp eq %struct.termbox* null, null ; <i1> [#uses=1]
+ %tmp68 = icmp eq ptr null, null ; <i1> [#uses=1]
br i1 %tmp68, label %cond_next448, label %bb80
bb80: ; preds = %entry
; CHECK-LABEL: @main(
; CHECK-NEXT: entry:
; CHECK-NEXT: [[U:%.*]] = alloca %struct..1anon, align 8
-; CHECK-NEXT: [[TMP1:%.*]] = getelementptr inbounds %struct..1anon, %struct..1anon* [[U]], i64 0, i32 0
-; CHECK-NEXT: store double 0x7FF0000000000000, double* [[TMP1]], align 8
-; CHECK-NEXT: [[TMP34:%.*]] = bitcast %struct..1anon* [[U]] to %struct..0anon*
-; CHECK-NEXT: [[TMP5:%.*]] = getelementptr inbounds %struct..0anon, %struct..0anon* [[TMP34]], i64 0, i32 1
-; CHECK-NEXT: [[TMP6:%.*]] = load i32, i32* [[TMP5]], align 4
+; CHECK-NEXT: store double 0x7FF0000000000000, ptr [[U]], align 8
+; CHECK-NEXT: [[TMP5:%.*]] = getelementptr inbounds %struct..0anon, ptr [[U]], i64 0, i32 1
+; CHECK-NEXT: [[TMP6:%.*]] = load i32, ptr [[TMP5]], align 4
; CHECK-NEXT: [[TMP89:%.*]] = and i32 [[TMP6]], 2146435072
; CHECK-NEXT: [[TMP0:%.*]] = icmp eq i32 [[TMP89]], 2146435072
; CHECK-NEXT: br i1 [[TMP0]], label %cond_false, label %cond_true
;
entry:
%u = alloca %struct..1anon, align 8
- %tmp1 = getelementptr %struct..1anon, %struct..1anon* %u, i32 0, i32 0
- store double 0x7FF0000000000000, double* %tmp1
- %tmp3 = getelementptr %struct..1anon, %struct..1anon* %u, i32 0, i32 0
- %tmp34 = bitcast double* %tmp3 to %struct..0anon*
- %tmp5 = getelementptr %struct..0anon, %struct..0anon* %tmp34, i32 0, i32 1
- %tmp6 = load i32, i32* %tmp5
+ store double 0x7FF0000000000000, ptr %u
+ %tmp5 = getelementptr %struct..0anon, ptr %u, i32 0, i32 1
+ %tmp6 = load i32, ptr %tmp5
%tmp7 = shl i32 %tmp6, 1
%tmp8 = lshr i32 %tmp7, 21
%tmp89 = trunc i32 %tmp8 to i16
target triple = "i686-pc-linux-gnu"
-define i1 @test(i32* %tmp141, i32* %tmp145,
- i32 %b8, i32 %iftmp.430.0, i32* %tmp134.out, i32* %tmp137.out)
+define i1 @test(ptr %tmp141, ptr %tmp145,
+ i32 %b8, i32 %iftmp.430.0, ptr %tmp134.out, ptr %tmp137.out)
{
newFuncRoot:
%tmp133 = and i32 %b8, 1 ; <i32> [#uses=1]
%tmp136 = ashr i32 %b8, 1 ; <i32> [#uses=1]
%tmp137 = shl i32 %tmp136, 3 ; <i32> [#uses=3]
%tmp139 = ashr i32 %tmp134, 2 ; <i32> [#uses=1]
- store i32 %tmp139, i32* %tmp141
+ store i32 %tmp139, ptr %tmp141
%tmp143 = ashr i32 %tmp137, 2 ; <i32> [#uses=1]
- store i32 %tmp143, i32* %tmp145
+ store i32 %tmp143, ptr %tmp145
icmp eq i32 %iftmp.430.0, 0 ; <i1>:0 [#uses=1]
zext i1 %0 to i8 ; <i8>:1 [#uses=1]
icmp ne i8 %1, 0 ; <i1>:2 [#uses=1]
br i1 %2, label %cond_true147.exitStub, label %cond_false252.exitStub
cond_true147.exitStub: ; preds = %newFuncRoot
- store i32 %tmp134, i32* %tmp134.out
- store i32 %tmp137, i32* %tmp137.out
+ store i32 %tmp134, ptr %tmp134.out
+ store i32 %tmp137, ptr %tmp137.out
ret i1 true
cond_false252.exitStub: ; preds = %newFuncRoot
- store i32 %tmp134, i32* %tmp134.out
- store i32 %tmp137, i32* %tmp137.out
+ store i32 %tmp134, ptr %tmp134.out
+ store i32 %tmp137, ptr %tmp137.out
ret i1 false
}
target triple = "powerpc-unknown-linux-gnu"
%struct.abc = type { i32, [32 x i8] }
-%struct.def = type { i8**, %struct.abc }
+%struct.def = type { ptr, %struct.abc }
%struct.anon = type <{ }>
-define i8* @foo(%struct.anon* %deviceRef, %struct.abc* %pCap) {
+define ptr @foo(ptr %deviceRef, ptr %pCap) {
entry:
- %tmp1 = bitcast %struct.anon* %deviceRef to %struct.def*
- %tmp3 = getelementptr %struct.def, %struct.def* %tmp1, i32 0, i32 1
- %tmp35 = bitcast %struct.abc* %tmp3 to i8*
- ret i8* %tmp35
+ %tmp3 = getelementptr %struct.def, ptr %deviceRef, i32 0, i32 1
+ ret ptr %tmp3
}
; RUN: opt < %s -passes=instcombine -S | grep "call.*sret"
; Make sure instcombine doesn't drop the sret attribute.
-define void @blah(i16* %tmp10) {
+define void @blah(ptr %tmp10) {
entry:
- call void bitcast (i8* (i8*, i8*, ...)* @objc_msgSend_stret to void (i16*)*)(i16* sret(i16) %tmp10)
+ call void @objc_msgSend_stret(ptr sret(i16) %tmp10)
ret void
}
-declare i8* @objc_msgSend_stret(i8*, i8*, ...)
+declare ptr @objc_msgSend_stret(ptr, ptr, ...)
; RUN: opt < %s -passes=instcombine -S | grep "ashr"
; PR1499
-define void @av_cmp_q_cond_true(i32* %retval, i32* %tmp9, i64* %tmp10) {
+define void @av_cmp_q_cond_true(ptr %retval, ptr %tmp9, ptr %tmp10) {
newFuncRoot:
br label %cond_true
ret void
cond_true: ; preds = %newFuncRoot
- %tmp30 = load i64, i64* %tmp10 ; <i64> [#uses=1]
+ %tmp30 = load i64, ptr %tmp10 ; <i64> [#uses=1]
%.cast = zext i32 63 to i64 ; <i64> [#uses=1]
%tmp31 = ashr i64 %tmp30, %.cast ; <i64> [#uses=1]
%tmp3132 = trunc i64 %tmp31 to i32 ; <i32> [#uses=1]
%tmp33 = or i32 %tmp3132, 1 ; <i32> [#uses=1]
- store i32 %tmp33, i32* %tmp9
- %tmp34 = load i32, i32* %tmp9 ; <i32> [#uses=1]
- store i32 %tmp34, i32* %retval
+ store i32 %tmp33, ptr %tmp9
+ %tmp34 = load i32, ptr %tmp9 ; <i32> [#uses=1]
+ store i32 %tmp34, ptr %retval
br label %return.exitStub
}
; RUN: opt < %s -passes=instcombine -S | grep icmp
; PR1646
-@__gthrw_pthread_cancel = weak alias i32 (i32), i32 (i32)* @pthread_cancel ; <i32 (i32)*> [#uses=1]
-@__gthread_active_ptr.5335 = internal constant i8* bitcast (i32 (i32)* @__gthrw_pthread_cancel to i8*) ; <i8**> [#uses=1]
+@__gthrw_pthread_cancel = weak alias i32 (i32), ptr @pthread_cancel ; <ptr> [#uses=1]
+@__gthread_active_ptr.5335 = internal constant ptr @__gthrw_pthread_cancel ; <ptr> [#uses=1]
define weak i32 @pthread_cancel(i32) {
ret i32 0
}
define i1 @__gthread_active_p() {
entry:
- %tmp1 = load i8*, i8** @__gthread_active_ptr.5335, align 4 ; <i8*> [#uses=1]
- %tmp2 = icmp ne i8* %tmp1, null ; <i1> [#uses=1]
+ %tmp1 = load ptr, ptr @__gthread_active_ptr.5335, align 4 ; <ptr> [#uses=1]
+ %tmp2 = icmp ne ptr %tmp1, null ; <i1> [#uses=1]
ret i1 %tmp2
}
; RUN: opt < %s -passes=instcombine -S | grep icmp
; PR1678
-@A = weak alias void (), void ()* @B ; <void ()*> [#uses=1]
+@A = weak alias void (), ptr @B ; <ptr> [#uses=1]
define weak void @B() {
ret void
define i32 @active() {
entry:
%"alloca point" = bitcast i32 0 to i32 ; <i32> [#uses=0]
- %tmp1 = icmp ne void ()* @A, null ; <i1> [#uses=1]
+ %tmp1 = icmp ne ptr @A, null ; <i1> [#uses=1]
%tmp12 = zext i1 %tmp1 to i32 ; <i32> [#uses=1]
ret i32 %tmp12
}
; RUN: opt < %s -O3 -S | not grep xyz
target datalayout = "E-p:64:64:64-a0:0:8-f32:32:32-f64:64:64-i1:8:8-i8:8:8-i16:16:16-i32:32:32-i64:32:64-v64:64:64-v128:128:128"
-@.str = internal constant [4 x i8] c"xyz\00" ; <[4 x i8]*> [#uses=1]
+@.str = internal constant [4 x i8] c"xyz\00" ; <ptr> [#uses=1]
-define void @foo(i8* %P) {
+define void @foo(ptr %P) {
entry:
- %P_addr = alloca i8*
- store i8* %P, i8** %P_addr
- %tmp = load i8*, i8** %P_addr, align 4
- %tmp1 = getelementptr [4 x i8], [4 x i8]* @.str, i32 0, i32 0
- call void @llvm.memcpy.p0i8.p0i8.i32(i8* %tmp, i8* %tmp1, i32 4, i1 false)
+ %P_addr = alloca ptr
+ store ptr %P, ptr %P_addr
+ %tmp = load ptr, ptr %P_addr, align 4
+ call void @llvm.memcpy.p0.p0.i32(ptr %tmp, ptr @.str, i32 4, i1 false)
br label %return
return: ; preds = %entry
ret void
}
-declare void @llvm.memcpy.p0i8.p0i8.i32(i8* nocapture, i8* nocapture, i32, i1) nounwind
+declare void @llvm.memcpy.p0.p0.i32(ptr nocapture, ptr nocapture, i32, i1) nounwind
; RUN: opt < %s -passes=instcombine -disable-output
%struct.Ray = type { %struct.Vec, %struct.Vec }
- %struct.Scene = type { i32 (...)** }
+ %struct.Scene = type { ptr }
%struct.Vec = type { double, double, double }
-declare double @_Z9ray_traceRK3VecRK3RayRK5Scene(%struct.Vec*, %struct.Ray*, %struct.Scene*)
+declare double @_Z9ray_traceRK3VecRK3RayRK5Scene(ptr, ptr, ptr)
-define i32 @main(i32 %argc, i8** %argv) {
+define i32 @main(i32 %argc, ptr %argv) {
entry:
- %tmp3 = alloca %struct.Ray, align 4 ; <%struct.Ray*> [#uses=2]
+ %tmp3 = alloca %struct.Ray, align 4 ; <ptr> [#uses=2]
%tmp97 = icmp slt i32 0, 512 ; <i1> [#uses=1]
br i1 %tmp97, label %bb71, label %bb108
bb29: ; preds = %bb62
- %tmp322 = bitcast %struct.Ray* %tmp3 to %struct.Vec* ; <%struct.Vec*> [#uses=1]
- %tmp322.0 = getelementptr %struct.Vec, %struct.Vec* %tmp322, i32 0, i32 0 ; <double*> [#uses=1]
- store double 0.000000e+00, double* %tmp322.0
- %tmp57 = call double @_Z9ray_traceRK3VecRK3RayRK5Scene( %struct.Vec* null, %struct.Ray* %tmp3, %struct.Scene* null ) ; <double> [#uses=0]
+ %tmp322.0 = getelementptr %struct.Vec, ptr %tmp3, i32 0, i32 0 ; <ptr> [#uses=1]
+ store double 0.000000e+00, ptr %tmp322.0
+ %tmp57 = call double @_Z9ray_traceRK3VecRK3RayRK5Scene( ptr null, ptr %tmp3, ptr null ) ; <double> [#uses=0]
br label %bb62
bb62: ; preds = %bb71, %bb29
; PR1745
target datalayout = "e-p:32:32:32-i1:8:8-i8:8:8-i16:16:16-i32:32:32-i64:32:64-f32:32:32-f64:32:64-v64:64:64-v128:128:128-a0:0:64-f80:128:128"
target triple = "i686-apple-darwin8"
-@p = weak global i8* null ; <i8**> [#uses=1]
+@p = weak global ptr null ; <ptr> [#uses=1]
define i32 @main() {
entry:
lab: ; preds = %cleanup31, %entry
%n.0 = phi i32 [ 0, %entry ], [ %tmp25, %cleanup31 ] ; <i32> [#uses=2]
- %tmp2 = call i8* @llvm.stacksave( ) ; <i8*> [#uses=2]
+ %tmp2 = call ptr @llvm.stacksave( ) ; <ptr> [#uses=2]
%tmp4 = srem i32 %n.0, 47 ; <i32> [#uses=1]
%tmp5 = add i32 %tmp4, 1 ; <i32> [#uses=5]
%tmp7 = sub i32 %tmp5, 1 ; <i32> [#uses=0]
%tmp1314 = zext i32 %tmp5 to i64 ; <i64> [#uses=1]
%tmp15 = mul i64 %tmp1314, 32 ; <i64> [#uses=0]
%tmp17 = mul i32 %tmp5, 4 ; <i32> [#uses=1]
- %tmp18 = alloca i8, i32 %tmp17 ; <i8*> [#uses=1]
- %tmp1819 = bitcast i8* %tmp18 to i32* ; <i32*> [#uses=2]
- %tmp21 = getelementptr i32, i32* %tmp1819, i32 0 ; <i32*> [#uses=1]
- store i32 1, i32* %tmp21, align 4
- %tmp2223 = bitcast i32* %tmp1819 to i8* ; <i8*> [#uses=1]
- store volatile i8* %tmp2223, i8** @p, align 4
+ %tmp18 = alloca i8, i32 %tmp17 ; <ptr> [#uses=1]
+ %tmp21 = getelementptr i32, ptr %tmp18, i32 0 ; <ptr> [#uses=1]
+ store i32 1, ptr %tmp21, align 4
+ store volatile ptr %tmp18, ptr @p, align 4
%tmp25 = add i32 %n.0, 1 ; <i32> [#uses=2]
%tmp27 = icmp sle i32 %tmp25, 999999 ; <i1> [#uses=1]
%tmp2728 = zext i1 %tmp27 to i8 ; <i8> [#uses=1]
br i1 %toBool, label %cleanup31, label %cond_next
cond_next: ; preds = %lab
- call void @llvm.stackrestore( i8* %tmp2 )
+ call void @llvm.stackrestore( ptr %tmp2 )
ret i32 0
cleanup31: ; preds = %lab
- call void @llvm.stackrestore( i8* %tmp2 )
+ call void @llvm.stackrestore( ptr %tmp2 )
br label %lab
}
-declare i8* @llvm.stacksave()
+declare ptr @llvm.stacksave()
-declare void @llvm.stackrestore(i8*)
+declare void @llvm.stackrestore(ptr)
br label %bb51.i.i
bb27.i.i: ; preds = %bb51.i.i
- %tmp31.i.i = load i16, i16* null, align 2 ; <i16> [#uses=2]
+ %tmp31.i.i = load i16, ptr null, align 2 ; <i16> [#uses=2]
%tmp35.i.i = icmp ult i16 %tmp31.i.i, 1 ; <i1> [#uses=1]
%tmp41.i.i = icmp ugt i16 %tmp31.i.i, -1 ; <i1> [#uses=1]
%bothcond.i.i = or i1 %tmp35.i.i, %tmp41.i.i ; <i1> [#uses=1]
declare void @__darwin_gcc3_preregister_frame_info()
-define void @_start(i32 %argc, i8** %argv, i8** %envp) {
+define void @_start(i32 %argc, ptr %argv, ptr %envp) {
entry:
- %tmp1 = bitcast void ()* @__darwin_gcc3_preregister_frame_info to i32* ; <i32*> [#uses=1]
- %tmp2 = load i32, i32* %tmp1, align 4 ; <i32> [#uses=1]
+ %tmp2 = load i32, ptr @__darwin_gcc3_preregister_frame_info, align 4 ; <i32> [#uses=1]
%tmp3 = icmp ne i32 %tmp2, 0 ; <i1> [#uses=1]
%tmp34 = zext i1 %tmp3 to i8 ; <i8> [#uses=1]
%toBool = icmp ne i8 %tmp34, 0 ; <i1> [#uses=1]
define i32 @foo() {
entry:
- %x = load i8, i8* bitcast (%opaque_t* @g to i8*)
- %y = load i32, i32* bitcast (%op_ts* @h to i32*)
+ %x = load i8, ptr @g
+ %y = load i32, ptr @h
%z = zext i8 %x to i32
%r = add i32 %y, %z
ret i32 %r
; RUN: opt < %s -passes=instcombine -S | not grep bitcast
; PR1716
-@.str = internal constant [4 x i8] c"%d\0A\00" ; <[4 x i8]*> [#uses=1]
+@.str = internal constant [4 x i8] c"%d\0A\00" ; <ptr> [#uses=1]
-define i32 @main(i32 %argc, i8** %argv) {
+define i32 @main(i32 %argc, ptr %argv) {
entry:
- %tmp32 = tail call i32 (i8* , ...) bitcast (i32 (i8*, ...) * @printf to i32 (i8* , ...) *)( i8* getelementptr ([4 x i8], [4 x i8]* @.str, i32 0, i32 0) , i32 0 ) nounwind ; <i32> [#uses=0]
+ %tmp32 = tail call i32 (ptr , ...) @printf( ptr @.str , i32 0 ) nounwind ; <i32> [#uses=0]
ret i32 undef
}
-declare i32 @printf(i8*, ...) nounwind
+declare i32 @printf(ptr, ...) nounwind
; PR1850
define i1 @test() {
- %cond = icmp ule i8* inttoptr (i64 4294967297 to i8*), inttoptr (i64 5 to i8*)
+ %cond = icmp ule ptr inttoptr (i64 4294967297 to ptr), inttoptr (i64 5 to ptr)
ret i1 %cond
}
target datalayout = "e-p:32:32:32-i1:8:8-i8:8:8-i16:16:16-i32:32:32-i64:32:64-f32:32:32-f64:32:64-v64:64:64-v128:128:128-a0:0:64-f80:32:32"
-define i8* @foo([100 x {i8,i8,i8}]* %x) {
+define ptr @foo(ptr %x) {
entry:
- %p = bitcast [100 x {i8,i8,i8}]* %x to i8*
- %q = getelementptr i8, i8* %p, i32 -4
- ret i8* %q
+ %q = getelementptr i8, ptr %x, i32 -4
+ ret ptr %q
}
define i32 @test1() {
entry:
%z = alloca i32
- store i32 0, i32* %z
- %tmp = load i32, i32* %z
+ store i32 0, ptr %z
+ %tmp = load i32, ptr %z
%sub = sub i32 %tmp, 1
%cmp = icmp ule i32 %sub, 0
%retval = select i1 %cmp, i32 0, i32 1
define i32 @test2() {
entry:
%z = alloca i32
- store i32 0, i32* %z
- %tmp = load i32, i32* %z
+ store i32 0, ptr %z
+ %tmp = load i32, ptr %z
%sub = sub i32 %tmp, 1
%cmp = icmp ugt i32 %sub, 0
%retval = select i1 %cmp, i32 1, i32 0
define i32 @test3() {
entry:
%z = alloca i32
- store i32 0, i32* %z
- %tmp = load i32, i32* %z
+ store i32 0, ptr %z
+ %tmp = load i32, ptr %z
%sub = sub i32 %tmp, 1
%cmp = icmp slt i32 %sub, 0
%retval = select i1 %cmp, i32 1, i32 0
define i32 @test4() {
entry:
%z = alloca i32
- store i32 0, i32* %z
- %tmp = load i32, i32* %z
+ store i32 0, ptr %z
+ %tmp = load i32, ptr %z
%sub = sub i32 %tmp, 1
%cmp = icmp sle i32 %sub, 0
%retval = select i1 %cmp, i32 1, i32 0
define i32 @test5() {
entry:
%z = alloca i32
- store i32 0, i32* %z
- %tmp = load i32, i32* %z
+ store i32 0, ptr %z
+ %tmp = load i32, ptr %z
%sub = sub i32 %tmp, 1
%cmp = icmp sge i32 %sub, 0
%retval = select i1 %cmp, i32 0, i32 1
define i32 @test6() {
entry:
%z = alloca i32
- store i32 0, i32* %z
- %tmp = load i32, i32* %z
+ store i32 0, ptr %z
+ %tmp = load i32, ptr %z
%sub = sub i32 %tmp, 1
%cmp = icmp sgt i32 %sub, 0
%retval = select i1 %cmp, i32 0, i32 1
define i32 @test7() {
entry:
%z = alloca i32
- store i32 0, i32* %z
- %tmp = load i32, i32* %z
+ store i32 0, ptr %z
+ %tmp = load i32, ptr %z
%sub = sub i32 %tmp, 1
%cmp = icmp eq i32 %sub, 0
%retval = select i1 %cmp, i32 0, i32 1
define i32 @test8() {
entry:
%z = alloca i32
- store i32 0, i32* %z
- %tmp = load i32, i32* %z
+ store i32 0, ptr %z
+ %tmp = load i32, ptr %z
%sub = sub i32 %tmp, 1
%cmp = icmp ne i32 %sub, 0
%retval = select i1 %cmp, i32 1, i32 0
ret void
}
-define signext i32 @b(i32* inreg %x) {
+define signext i32 @b(ptr inreg %x) {
ret i32 0
}
ret void
}
-define void @g(i32* %y) {
+define void @g(ptr %y) {
; CHECK-LABEL: @g(
-; CHECK: call i64 bitcast (i32 (i32*)* @b to i64 (i32)*)(i32 0)
- %x = call i64 bitcast (i32 (i32*)* @b to i64 (i32)*)( i32 0 ) ; <i64> [#uses=0]
+; CHECK: call i64 @b(i32 0)
+ %x = call i64 @b( i32 0 ) ; <i64> [#uses=0]
; The rest should not have bitcasts remaining
; CHECK-NOT: bitcast
- call void bitcast (void ()* @a to void (i32*)*)( i32* noalias %y )
- call <2 x i32> bitcast (i32 (i32*)* @b to <2 x i32> (i32*)*)( i32* inreg null ) ; <<2 x i32>>:1 [#uses=0]
- call void bitcast (void (...)* @c to void (i32)*)( i32 0 )
- call void bitcast (void (...)* @c to void (i32)*)( i32 zeroext 0 )
+ call void @a( ptr noalias %y )
+ call <2 x i32> @b( ptr inreg null ) ; <<2 x i32>>:1 [#uses=0]
+ call void @c( i32 0 )
+ call void @c( i32 zeroext 0 )
ret void
}
}
define i32 @g() {
- %x = call i32 bitcast (<2 x i32> ()* @f to i32 ()*)( ) ; <i32> [#uses=1]
+ %x = call i32 @f( ) ; <i32> [#uses=1]
ret i32 %x
}
; RUN: opt < %s -passes=instcombine -S | grep zeroext
- %struct.FRAME.nest = type { i32, i32 (...)* }
+ %struct.FRAME.nest = type { i32, ptr }
%struct.__builtin_trampoline = type { [10 x i8] }
-declare void @llvm.init.trampoline(i8*, i8*, i8*) nounwind
-declare i8* @llvm.adjust.trampoline(i8*) nounwind
+declare void @llvm.init.trampoline(ptr, ptr, ptr) nounwind
+declare ptr @llvm.adjust.trampoline(ptr) nounwind
-declare i32 @f(%struct.FRAME.nest* nest , ...)
+declare i32 @f(ptr nest , ...)
define i32 @nest(i32 %n) {
entry:
- %FRAME.0 = alloca %struct.FRAME.nest, align 8 ; <%struct.FRAME.nest*> [#uses=3]
- %TRAMP.216 = alloca [10 x i8], align 16 ; <[10 x i8]*> [#uses=1]
- %TRAMP.216.sub = getelementptr [10 x i8], [10 x i8]* %TRAMP.216, i32 0, i32 0 ; <i8*> [#uses=1]
- %tmp3 = getelementptr %struct.FRAME.nest, %struct.FRAME.nest* %FRAME.0, i32 0, i32 0 ; <i32*> [#uses=1]
- store i32 %n, i32* %tmp3, align 8
- %FRAME.06 = bitcast %struct.FRAME.nest* %FRAME.0 to i8* ; <i8*> [#uses=1]
- call void @llvm.init.trampoline( i8* %TRAMP.216.sub, i8* bitcast (i32 (%struct.FRAME.nest*, ...)* @f to i8*), i8* %FRAME.06 ) ; <i8*> [#uses=1]
- %tramp = call i8* @llvm.adjust.trampoline( i8* %TRAMP.216.sub)
- %tmp7 = getelementptr %struct.FRAME.nest, %struct.FRAME.nest* %FRAME.0, i32 0, i32 1 ; <i32 (...)**> [#uses=1]
- %tmp89 = bitcast i8* %tramp to i32 (...)* ; <i32 (...)*> [#uses=2]
- store i32 (...)* %tmp89, i32 (...)** %tmp7, align 8
- %tmp2.i = call i32 (...) %tmp89( i32 zeroext 0 ) ; <i32> [#uses=1]
+ %FRAME.0 = alloca %struct.FRAME.nest, align 8 ; <ptr> [#uses=3]
+ %TRAMP.216 = alloca [10 x i8], align 16 ; <ptr> [#uses=1]
+ %TRAMP.216.sub = getelementptr [10 x i8], ptr %TRAMP.216, i32 0, i32 0 ; <ptr> [#uses=1]
+ %tmp3 = getelementptr %struct.FRAME.nest, ptr %FRAME.0, i32 0, i32 0 ; <ptr> [#uses=1]
+ store i32 %n, ptr %tmp3, align 8
+ call void @llvm.init.trampoline( ptr %TRAMP.216.sub, ptr @f, ptr %FRAME.0 ) ; <ptr> [#uses=1]
+ %tramp = call ptr @llvm.adjust.trampoline( ptr %TRAMP.216.sub)
+ %tmp7 = getelementptr %struct.FRAME.nest, ptr %FRAME.0, i32 0, i32 1 ; <ptr> [#uses=1]
+ store ptr %tramp, ptr %tmp7, align 8
+ %tmp2.i = call i32 (...) %tramp( i32 zeroext 0 ) ; <i32> [#uses=1]
ret i32 %tmp2.i
}
; RUN: opt < %s -passes=instcombine -S | grep "16" | count 1
-define i8* @bork(i8** %qux) {
- %tmp275 = load i8*, i8** %qux, align 1
- %tmp275276 = ptrtoint i8* %tmp275 to i32
+define ptr @bork(ptr %qux) {
+ %tmp275 = load ptr, ptr %qux, align 1
+ %tmp275276 = ptrtoint ptr %tmp275 to i32
%tmp277 = add i32 %tmp275276, 16
- %tmp277278 = inttoptr i32 %tmp277 to i8*
- ret i8* %tmp277278
+ %tmp277278 = inttoptr i32 %tmp277 to ptr
+ ret ptr %tmp277278
}
; RUN: opt < %s -passes=instcombine -S | grep "store volatile"
define void @test() {
- %votf = alloca <4 x float> ; <<4 x float>*> [#uses=1]
- store volatile <4 x float> zeroinitializer, <4 x float>* %votf, align 16
+ %votf = alloca <4 x float> ; <ptr> [#uses=1]
+ store volatile <4 x float> zeroinitializer, ptr %votf, align 16
ret void
}
; RUN: opt < %s -passes=instcombine -S | grep "load volatile" | count 2
target datalayout = "e-p:32:32:32-i1:8:8-i8:8:8-i16:16:16-i32:32:32-i64:32:64-f32:32:32-f64:32:64-v64:64:64-v128:128:128-a0:0:64-f80:128:128"
target triple = "i386-apple-darwin8"
-@g_1 = internal global i32 0 ; <i32*> [#uses=3]
+@g_1 = internal global i32 0 ; <ptr> [#uses=3]
define i32 @main() nounwind {
entry:
%tmp93 = icmp slt i32 0, 10 ; <i1> [#uses=0]
- %tmp34 = load volatile i32, i32* @g_1, align 4 ; <i32> [#uses=1]
+ %tmp34 = load volatile i32, ptr @g_1, align 4 ; <i32> [#uses=1]
br label %bb
bb: ; preds = %bb, %entry
%b.0.reg2mem.0 = phi i32 [ 0, %entry ], [ %tmp6, %bb ] ; <i32> [#uses=1]
%tmp3.reg2mem.0 = phi i32 [ %tmp34, %entry ], [ %tmp3, %bb ] ; <i32> [#uses=1]
%tmp4 = add i32 %tmp3.reg2mem.0, 5 ; <i32> [#uses=1]
- store volatile i32 %tmp4, i32* @g_1, align 4
+ store volatile i32 %tmp4, ptr @g_1, align 4
%tmp6 = add i32 %b.0.reg2mem.0, 1 ; <i32> [#uses=2]
%tmp9 = icmp slt i32 %tmp6, 10 ; <i1> [#uses=1]
- %tmp3 = load volatile i32, i32* @g_1, align 4 ; <i32> [#uses=1]
+ %tmp3 = load volatile i32, ptr @g_1, align 4 ; <i32> [#uses=1]
br i1 %tmp9, label %bb, label %bb11
bb11: ; preds = %bb
; PR2262
target datalayout = "e-p:32:32:32-i1:8:8-i8:8:8-i16:16:16-i32:32:32-i64:32:64-f32:32:32-f64:32:64-v64:64:64-v128:128:128-a0:0:64-f80:128:128"
target triple = "i386-apple-darwin8"
-@g_1 = internal global i32 0 ; <i32*> [#uses=3]
+@g_1 = internal global i32 0 ; <ptr> [#uses=3]
define i32 @main(i32 %i) nounwind {
entry:
%tmp93 = icmp slt i32 %i, 10 ; <i1> [#uses=0]
- %tmp34 = load volatile i32, i32* @g_1, align 4 ; <i32> [#uses=1]
+ %tmp34 = load volatile i32, ptr @g_1, align 4 ; <i32> [#uses=1]
br i1 %tmp93, label %bb11, label %bb
bb: ; preds = %bb, %entry
- %tmp3 = load volatile i32, i32* @g_1, align 4 ; <i32> [#uses=1]
+ %tmp3 = load volatile i32, ptr @g_1, align 4 ; <i32> [#uses=1]
br label %bb11
bb11: ; preds = %bb
define i32 @a() nounwind {
entry:
%"alloca point" = bitcast i32 0 to i32 ; <i32> [#uses=0]
- %tmp1 = call i8* @malloc( i32 10 ) nounwind ; <i8*> [#uses=5]
- %tmp3 = getelementptr i8, i8* %tmp1, i32 1 ; <i8*> [#uses=1]
- store i8 0, i8* %tmp3, align 1
- %tmp5 = getelementptr i8, i8* %tmp1, i32 0 ; <i8*> [#uses=1]
- store i8 1, i8* %tmp5, align 1
- %tmp7 = call i32 @strlen( i8* %tmp1 ) nounwind readonly ; <i32> [#uses=1]
- %tmp9 = getelementptr i8, i8* %tmp1, i32 0 ; <i8*> [#uses=1]
- store i8 0, i8* %tmp9, align 1
- %tmp11 = call i32 (...) @b( i8* %tmp1 ) nounwind ; <i32> [#uses=0]
+ %tmp1 = call ptr @malloc( i32 10 ) nounwind ; <ptr> [#uses=5]
+ %tmp3 = getelementptr i8, ptr %tmp1, i32 1 ; <ptr> [#uses=1]
+ store i8 0, ptr %tmp3, align 1
+ %tmp5 = getelementptr i8, ptr %tmp1, i32 0 ; <ptr> [#uses=1]
+ store i8 1, ptr %tmp5, align 1
+ %tmp7 = call i32 @strlen( ptr %tmp1 ) nounwind readonly ; <i32> [#uses=1]
+ %tmp9 = getelementptr i8, ptr %tmp1, i32 0 ; <ptr> [#uses=1]
+ store i8 0, ptr %tmp9, align 1
+ %tmp11 = call i32 (...) @b( ptr %tmp1 ) nounwind ; <i32> [#uses=0]
ret i32 %tmp7
}
-declare i8* @malloc(i32) nounwind
+declare ptr @malloc(i32) nounwind
-declare i32 @strlen(i8*) nounwind readonly
+declare i32 @strlen(ptr) nounwind readonly
declare i32 @b(...)
define i32 @a() nounwind {
entry:
%"alloca point" = bitcast i32 0 to i32 ; <i32> [#uses=0]
- %tmp1 = call i8* @malloc( i32 10 ) nounwind ; <i8*> [#uses=5]
- %tmp3 = getelementptr i8, i8* %tmp1, i32 1 ; <i8*> [#uses=1]
- store i8 0, i8* %tmp3, align 1
- %tmp5 = getelementptr i8, i8* %tmp1, i32 0 ; <i8*> [#uses=1]
- store i8 1, i8* %tmp5, align 1
+ %tmp1 = call ptr @malloc( i32 10 ) nounwind ; <ptr> [#uses=5]
+ %tmp3 = getelementptr i8, ptr %tmp1, i32 1 ; <ptr> [#uses=1]
+ store i8 0, ptr %tmp3, align 1
+ %tmp5 = getelementptr i8, ptr %tmp1, i32 0 ; <ptr> [#uses=1]
+ store i8 1, ptr %tmp5, align 1
; CHECK: store
; CHECK: store
; CHECK-NEXT: strlen
; CHECK-NEXT: store
- %tmp7 = call i32 @strlen( i8* %tmp1 ) nounwind readonly ; <i32> [#uses=1]
- %tmp9 = getelementptr i8, i8* %tmp1, i32 0 ; <i8*> [#uses=1]
- store i8 0, i8* %tmp9, align 1
- %tmp11 = call i32 (...) @b( i8* %tmp1 ) nounwind ; <i32> [#uses=0]
+ %tmp7 = call i32 @strlen( ptr %tmp1 ) nounwind readonly ; <i32> [#uses=1]
+ %tmp9 = getelementptr i8, ptr %tmp1, i32 0 ; <ptr> [#uses=1]
+ store i8 0, ptr %tmp9, align 1
+ %tmp11 = call i32 (...) @b( ptr %tmp1 ) nounwind ; <i32> [#uses=0]
br label %return
return: ; preds = %entry
ret i32 %tmp7
}
-declare i8* @malloc(i32) nounwind
+declare ptr @malloc(i32) nounwind
-declare i32 @strlen(i8*) nounwind readonly
+declare i32 @strlen(ptr) nounwind readonly
declare i32 @b(...)
; RUN: opt < %s -passes=instcombine -disable-output
; PR2303
- %"struct.std::ctype<char>" = type { %"struct.std::locale::facet", i32*, i8, i32*, i32*, i16*, i8, [256 x i8], [256 x i8], i8 }
- %"struct.std::locale::facet" = type { i32 (...)**, i32 }
+ %"struct.std::ctype<char>" = type { %"struct.std::locale::facet", ptr, i8, ptr, ptr, ptr, i8, [256 x i8], [256 x i8], i8 }
+ %"struct.std::locale::facet" = type { ptr, i32 }
-declare i32* @_ZNSt6locale5facet15_S_get_c_localeEv()
+declare ptr @_ZNSt6locale5facet15_S_get_c_localeEv()
-declare i32** @__ctype_toupper_loc() readnone
+declare ptr @__ctype_toupper_loc() readnone
-declare i32** @__ctype_tolower_loc() readnone
+declare ptr @__ctype_tolower_loc() readnone
-define void @_ZNSt5ctypeIcEC2EPiPKtbm(%"struct.std::ctype<char>"* %this, i32* %unnamed_arg, i16* %__table, i8 zeroext %__del, i64 %__refs) personality i32 (...)* @__gxx_personality_v0 {
+define void @_ZNSt5ctypeIcEC2EPiPKtbm(ptr %this, ptr %unnamed_arg, ptr %__table, i8 zeroext %__del, i64 %__refs) personality ptr @__gxx_personality_v0 {
entry:
- %tmp8 = invoke i32* @_ZNSt6locale5facet15_S_get_c_localeEv( )
- to label %invcont unwind label %lpad ; <i32*> [#uses=0]
+ %tmp8 = invoke ptr @_ZNSt6locale5facet15_S_get_c_localeEv( )
+ to label %invcont unwind label %lpad ; <ptr> [#uses=0]
invcont: ; preds = %entry
- %tmp32 = invoke i32** @__ctype_toupper_loc( ) readnone
- to label %invcont31 unwind label %lpad ; <i32**> [#uses=0]
+ %tmp32 = invoke ptr @__ctype_toupper_loc( ) readnone
+ to label %invcont31 unwind label %lpad ; <ptr> [#uses=0]
invcont31: ; preds = %invcont
- %tmp38 = invoke i32** @__ctype_tolower_loc( ) readnone
- to label %invcont37 unwind label %lpad ; <i32**> [#uses=1]
+ %tmp38 = invoke ptr @__ctype_tolower_loc( ) readnone
+ to label %invcont37 unwind label %lpad ; <ptr> [#uses=1]
invcont37: ; preds = %invcont31
- %tmp39 = load i32*, i32** %tmp38, align 8 ; <i32*> [#uses=1]
- %tmp41 = getelementptr %"struct.std::ctype<char>", %"struct.std::ctype<char>"* %this, i32 0, i32 4 ; <i32**> [#uses=1]
- store i32* %tmp39, i32** %tmp41, align 8
+ %tmp39 = load ptr, ptr %tmp38, align 8 ; <ptr> [#uses=1]
+ %tmp41 = getelementptr %"struct.std::ctype<char>", ptr %this, i32 0, i32 4 ; <ptr> [#uses=1]
+ store ptr %tmp39, ptr %tmp41, align 8
ret void
lpad: ; preds = %invcont31, %invcont, %entry
- %exn = landingpad {i8*, i32}
+ %exn = landingpad {ptr, i32}
cleanup
unreachable
}
define void @f(i32) {
entry:
- %blockSize100k = alloca i32 ; <i32*> [#uses=2]
- store i32 %0, i32* %blockSize100k
- %n = alloca i32 ; <i32*> [#uses=2]
- load i32, i32* %blockSize100k ; <i32>:1 [#uses=1]
- store i32 %1, i32* %n
- load i32, i32* %n ; <i32>:2 [#uses=1]
+ %blockSize100k = alloca i32 ; <ptr> [#uses=2]
+ store i32 %0, ptr %blockSize100k
+ %n = alloca i32 ; <ptr> [#uses=2]
+ load i32, ptr %blockSize100k ; <i32>:1 [#uses=1]
+ store i32 %1, ptr %n
+ load i32, ptr %n ; <i32>:2 [#uses=1]
add i32 %2, 2 ; <i32>:3 [#uses=1]
- mul i32 %3, ptrtoint (i32* getelementptr (i32, i32* null, i32 1) to i32) ; <i32>:4 [#uses=1]
+ mul i32 %3, ptrtoint (ptr getelementptr (i32, ptr null, i32 1) to i32) ; <i32>:4 [#uses=1]
call void @BZALLOC( i32 %4 )
br label %return
target triple = "i386-pc-linux-gnu"
define i1 @f1() {
- ret i1 icmp eq (i8* inttoptr (i32 1 to i8*), i8* inttoptr (i32 2 to i8*))
+ ret i1 icmp eq (ptr inttoptr (i32 1 to ptr), ptr inttoptr (i32 2 to ptr))
}
define i1 @f2() {
- ret i1 icmp eq (i8* inttoptr (i16 1 to i8*), i8* inttoptr (i16 2 to i8*))
+ ret i1 icmp eq (ptr inttoptr (i16 1 to ptr), ptr inttoptr (i16 2 to ptr))
}
; CHECK-LABEL: @f(
; CHECK: ret i1 false
-define i1 @f(i8* %x) {
+define i1 @f(ptr %x) {
entry:
- %tmp462 = load i8, i8* %x, align 1 ; <i8> [#uses=1]
+ %tmp462 = load i8, ptr %x, align 1 ; <i8> [#uses=1]
%tmp462463 = sitofp i8 %tmp462 to float ; <float> [#uses=1]
%tmp464 = fcmp ugt float %tmp462463, 0x47EFFFFFE0000000 ; <i1>
ret i1 %tmp464
; RUN: opt < %s -passes=instcombine -S | grep "store i32" | count 2
-@g_139 = global i32 0 ; <i32*> [#uses=2]
+@g_139 = global i32 0 ; <ptr> [#uses=2]
define void @func_56(i32 %p_60) nounwind {
entry:
- store i32 1, i32* @g_139, align 4
+ store i32 1, ptr @g_139, align 4
%tmp1 = icmp ne i32 %p_60, 0 ; <i1> [#uses=1]
%tmp12 = zext i1 %tmp1 to i8 ; <i8> [#uses=1]
%toBool = icmp ne i8 %tmp12, 0 ; <i1> [#uses=1]
br i1 %toBool, label %bb, label %return
bb: ; preds = %bb, %entry
- store i32 1, i32* @g_139, align 4
+ store i32 1, ptr @g_139, align 4
br label %bb
return: ; preds = %entry
; RUN: opt < %s -passes=instcombine -S | grep "store i8" | count 2
-define i32 @a(i8* %s) nounwind {
+define i32 @a(ptr %s) nounwind {
entry:
- store i8 0, i8* %s, align 1 ; This store cannot be eliminated!
- %tmp3 = call i32 @strlen( i8* %s ) nounwind readonly
+ store i8 0, ptr %s, align 1 ; This store cannot be eliminated!
+ %tmp3 = call i32 @strlen( ptr %s ) nounwind readonly
%tmp5 = icmp ne i32 %tmp3, 0
br i1 %tmp5, label %bb, label %bb8
bb: ; preds = %entry
- store i8 0, i8* %s, align 1
+ store i8 0, ptr %s, align 1
br label %bb8
bb8:
ret i32 %tmp3
}
-declare i32 @strlen(i8*) nounwind readonly
+declare i32 @strlen(ptr) nounwind readonly
; RUN: opt < %s -passes=instcombine -S | grep load | count 3
; PR2471
-declare i32 @x(i32*)
-define i32 @b(i32* %a, i32* %b) {
+declare i32 @x(ptr)
+define i32 @b(ptr %a, ptr %b) {
entry:
- %tmp1 = load i32, i32* %a
- %tmp3 = load i32, i32* %b
+ %tmp1 = load i32, ptr %a
+ %tmp3 = load i32, ptr %b
%add = add i32 %tmp1, %tmp3
- %call = call i32 @x( i32* %a )
+ %call = call i32 @x( ptr %a )
%tobool = icmp ne i32 %add, 0
; not safe to turn into an uncond load
- %cond = select i1 %tobool, i32* %b, i32* %a
- %tmp8 = load i32, i32* %cond
+ %cond = select i1 %tobool, ptr %b, ptr %a
+ %tmp8 = load i32, ptr %cond
ret i32 %tmp8
}
; PR2488
target datalayout = "e-p:32:32:32-i1:8:8-i8:8:8-i16:16:16-i32:32:32-i64:32:64-f32:32:32-f64:32:64-v64:64:64-v128:128:128-a0:0:64-f80:32:32"
target triple = "i386-pc-linux-gnu"
-@p = weak global i8* null ; <i8**> [#uses=2]
+@p = weak global ptr null ; <ptr> [#uses=2]
define i32 @main() nounwind {
entry:
- %tmp248 = call i8* @llvm.stacksave( ) ; <i8*> [#uses=1]
- %tmp2752 = alloca i32 ; <i32*> [#uses=2]
- %tmpcast53 = bitcast i32* %tmp2752 to i8* ; <i8*> [#uses=1]
- store i32 2, i32* %tmp2752, align 4
- store volatile i8* %tmpcast53, i8** @p, align 4
+ %tmp248 = call ptr @llvm.stacksave( ) ; <ptr> [#uses=1]
+ %tmp2752 = alloca i32 ; <ptr> [#uses=2]
+ store i32 2, ptr %tmp2752, align 4
+ store volatile ptr %tmp2752, ptr @p, align 4
br label %bb44
bb: ; preds = %bb44
bb44: ; preds = %bb44, %entry
%indvar = phi i32 [ 0, %entry ], [ %tmp3857, %bb44 ] ; <i32> [#uses=1]
- %tmp249 = phi i8* [ %tmp248, %entry ], [ %tmp2, %bb44 ] ; <i8*> [#uses=1]
+ %tmp249 = phi ptr [ %tmp248, %entry ], [ %tmp2, %bb44 ] ; <ptr> [#uses=1]
%tmp3857 = add i32 %indvar, 1 ; <i32> [#uses=3]
- call void @llvm.stackrestore( i8* %tmp249 )
- %tmp2 = call i8* @llvm.stacksave( ) ; <i8*> [#uses=1]
+ call void @llvm.stackrestore( ptr %tmp249 )
+ %tmp2 = call ptr @llvm.stacksave( ) ; <ptr> [#uses=1]
%tmp4 = srem i32 %tmp3857, 1000 ; <i32> [#uses=2]
%tmp5 = add i32 %tmp4, 1 ; <i32> [#uses=1]
- %tmp27 = alloca i32, i32 %tmp5 ; <i32*> [#uses=3]
- %tmpcast = bitcast i32* %tmp27 to i8* ; <i8*> [#uses=1]
- store i32 1, i32* %tmp27, align 4
- %tmp34 = getelementptr i32, i32* %tmp27, i32 %tmp4 ; <i32*> [#uses=1]
- store i32 2, i32* %tmp34, align 4
- store volatile i8* %tmpcast, i8** @p, align 4
+ %tmp27 = alloca i32, i32 %tmp5 ; <ptr> [#uses=3]
+ store i32 1, ptr %tmp27, align 4
+ %tmp34 = getelementptr i32, ptr %tmp27, i32 %tmp4 ; <ptr> [#uses=1]
+ store i32 2, ptr %tmp34, align 4
+ store volatile ptr %tmp27, ptr @p, align 4
%exitcond = icmp eq i32 %tmp3857, 999999 ; <i1> [#uses=1]
br i1 %exitcond, label %bb, label %bb44
}
-declare i8* @llvm.stacksave() nounwind
+declare ptr @llvm.stacksave() nounwind
-declare void @llvm.stackrestore(i8*) nounwind
+declare void @llvm.stackrestore(ptr) nounwind
; PR2496
target datalayout = "e-p:32:32:32-i1:8:8-i8:8:8-i16:16:16-i32:32:32-i64:32:64-f32:32:32-f64:32:64-v64:64:64-v128:128:128-a0:0:64-f80:128:128"
target triple = "i386-apple-darwin8"
-@g_1 = internal global i32 0 ; <i32*> [#uses=3]
+@g_1 = internal global i32 0 ; <ptr> [#uses=3]
define i32 @main() nounwind {
entry:
%tmp93 = icmp slt i32 0, 10 ; <i1> [#uses=0]
- %tmp34 = load volatile i32, i32* @g_1, align 4 ; <i32> [#uses=1]
+ %tmp34 = load volatile i32, ptr @g_1, align 4 ; <i32> [#uses=1]
br label %bb
bb: ; preds = %bb, %entry
%b.0.reg2mem.0 = phi i32 [ 0, %entry ], [ %tmp6, %bb ] ; <i32> [#uses=1]
%tmp3.reg2mem.0 = phi i32 [ %tmp3, %bb ], [ %tmp34, %entry ]
%tmp4 = add i32 %tmp3.reg2mem.0, 5 ; <i32> [#uses=1]
- store volatile i32 %tmp4, i32* @g_1, align 4
+ store volatile i32 %tmp4, ptr @g_1, align 4
%tmp6 = add i32 %b.0.reg2mem.0, 1 ; <i32> [#uses=2]
%tmp9 = icmp slt i32 %tmp6, 10 ; <i1> [#uses=1]
- %tmp3 = load volatile i32, i32* @g_1, align 4 ; <i32> [#uses=1]
+ %tmp3 = load volatile i32, ptr @g_1, align 4 ; <i32> [#uses=1]
br i1 %tmp9, label %bb, label %bb11
bb11: ; preds = %bb
; RUN: opt < %s -passes=instcombine -S | FileCheck %s
; PR2629
-define void @f(i8* %x) nounwind {
+define void @f(ptr %x) nounwind {
; CHECK-LABEL: @f(
; CHECK-NEXT: entry:
; CHECK-NEXT: br label [[BB:%.*]]
; CHECK: bb:
-; CHECK-NEXT: [[L1:%.*]] = load i8, i8* [[X:%.*]], align 1
+; CHECK-NEXT: [[L1:%.*]] = load i8, ptr [[X:%.*]], align 1
; CHECK-NEXT: [[TMP0:%.*]] = add i8 [[L1]], -9
; CHECK-NEXT: [[C1:%.*]] = icmp ult i8 [[TMP0]], -3
; CHECK-NEXT: [[TMP1:%.*]] = add i8 [[L1]], -13
br label %bb
bb:
- %g1 = getelementptr i8, i8* %x, i32 0
- %l1 = load i8, i8* %g1, align 1
+ %l1 = load i8, ptr %x, align 1
%s1 = sub i8 %l1, 6
%c1 = icmp ugt i8 %s1, 2
%s2 = sub i8 %l1, 10
ret void
}
-define void @f_logical(i8* %x) nounwind {
+define void @f_logical(ptr %x) nounwind {
; CHECK-LABEL: @f_logical(
; CHECK-NEXT: entry:
; CHECK-NEXT: br label [[BB:%.*]]
; CHECK: bb:
-; CHECK-NEXT: [[L1:%.*]] = load i8, i8* [[X:%.*]], align 1
+; CHECK-NEXT: [[L1:%.*]] = load i8, ptr [[X:%.*]], align 1
; CHECK-NEXT: [[TMP0:%.*]] = add i8 [[L1]], -9
; CHECK-NEXT: [[C1:%.*]] = icmp ult i8 [[TMP0]], -3
; CHECK-NEXT: [[TMP1:%.*]] = add i8 [[L1]], -13
br label %bb
bb:
- %g1 = getelementptr i8, i8* %x, i32 0
- %l1 = load i8, i8* %g1, align 1
+ %l1 = load i8, ptr %x, align 1
%s1 = sub i8 %l1, 6
%c1 = icmp ugt i8 %s1, 2
%s2 = sub i8 %l1, 10
%11 = extractelement <2 x i32> %10, i32 1 ; <i32> [#uses=1]
%12 = insertelement <4 x i32> zeroinitializer, i32 %11, i32 3 ; <<4 x i32>> [#uses=1]
%13 = sitofp <4 x i32> %12 to <4 x float> ; <<4 x float>> [#uses=1]
- store <4 x float> %13, <4 x float>* null
+ store <4 x float> %13, ptr null
br label %4
}
; PR2940
define i32 @tstid() {
- %var0 = inttoptr i32 1 to i8* ; <i8*> [#uses=1]
- %var2 = ptrtoint i8* %var0 to i32 ; <i32> [#uses=1]
+ %var0 = inttoptr i32 1 to ptr ; <ptr> [#uses=1]
+ %var2 = ptrtoint ptr %var0 to i32 ; <i32> [#uses=1]
ret i32 %var2
}
target datalayout = "e-p:32:32:32-i1:8:8-i8:8:8-i16:16:16-i32:32:32-i64:32:64-f32:32:32-f64:32:64-v64:64:64-v128:128:128-a0:0:64-f80:128:128"
target triple = "i386-apple-darwin9.6"
-define i32 @test(i32* %P) nounwind {
+define i32 @test(ptr %P) nounwind {
entry:
- %Q = addrspacecast i32* %P to i32 addrspace(1)*
- store i32 0, i32 addrspace(1)* %Q, align 4
+ %Q = addrspacecast ptr %P to ptr addrspace(1)
+ store i32 0, ptr addrspace(1) %Q, align 4
ret i32 0
}
; ModuleID = 'apf.c'
target datalayout = "e-p:32:32:32-i1:8:8-i8:8:8-i16:16:16-i32:32:32-i64:32:64-f32:32:32-f64:32:64-v64:64:64-v128:128:128-a0:0:64-f80:128:128"
target triple = "i386-apple-darwin9.6"
-@"\01LC" = internal constant [4 x i8] c"%f\0A\00" ; <[4 x i8]*> [#uses=1]
+@"\01LC" = internal constant [4 x i8] c"%f\0A\00" ; <ptr> [#uses=1]
define void @foo1() nounwind {
entry:
- %y = alloca float ; <float*> [#uses=2]
- %x = alloca float ; <float*> [#uses=2]
+ %y = alloca float ; <ptr> [#uses=2]
+ %x = alloca float ; <ptr> [#uses=2]
%"alloca point" = bitcast i32 0 to i32 ; <i32> [#uses=0]
- store float 0x7FF0000000000000, float* %x, align 4
- store float 0x7FF8000000000000, float* %y, align 4
- %0 = load float, float* %y, align 4 ; <float> [#uses=1]
+ store float 0x7FF0000000000000, ptr %x, align 4
+ store float 0x7FF8000000000000, ptr %y, align 4
+ %0 = load float, ptr %y, align 4 ; <float> [#uses=1]
%1 = fpext float %0 to double ; <double> [#uses=1]
- %2 = load float, float* %x, align 4 ; <float> [#uses=1]
+ %2 = load float, ptr %x, align 4 ; <float> [#uses=1]
%3 = fpext float %2 to double ; <double> [#uses=1]
%4 = frem double %3, %1 ; <double> [#uses=1]
- %5 = call i32 (i8*, ...) @printf(i8* getelementptr ([4 x i8], [4 x i8]* @"\01LC", i32 0, i32 0), double %4) nounwind ; <i32> [#uses=0]
+ %5 = call i32 (ptr, ...) @printf(ptr @"\01LC", double %4) nounwind ; <i32> [#uses=0]
br label %return
return: ; preds = %entry
ret void
}
-declare i32 @printf(i8*, ...) nounwind
+declare i32 @printf(ptr, ...) nounwind
define void @foo2() nounwind {
entry:
- %y = alloca float ; <float*> [#uses=2]
- %x = alloca float ; <float*> [#uses=2]
+ %y = alloca float ; <ptr> [#uses=2]
+ %x = alloca float ; <ptr> [#uses=2]
%"alloca point" = bitcast i32 0 to i32 ; <i32> [#uses=0]
- store float 0x7FF0000000000000, float* %x, align 4
- store float 0.000000e+00, float* %y, align 4
- %0 = load float, float* %y, align 4 ; <float> [#uses=1]
+ store float 0x7FF0000000000000, ptr %x, align 4
+ store float 0.000000e+00, ptr %y, align 4
+ %0 = load float, ptr %y, align 4 ; <float> [#uses=1]
%1 = fpext float %0 to double ; <double> [#uses=1]
- %2 = load float, float* %x, align 4 ; <float> [#uses=1]
+ %2 = load float, ptr %x, align 4 ; <float> [#uses=1]
%3 = fpext float %2 to double ; <double> [#uses=1]
%4 = frem double %3, %1 ; <double> [#uses=1]
- %5 = call i32 (i8*, ...) @printf(i8* getelementptr ([4 x i8], [4 x i8]* @"\01LC", i32 0, i32 0), double %4) nounwind ; <i32> [#uses=0]
+ %5 = call i32 (ptr, ...) @printf(ptr @"\01LC", double %4) nounwind ; <i32> [#uses=0]
br label %return
return: ; preds = %entry
define void @foo3() nounwind {
entry:
- %y = alloca float ; <float*> [#uses=2]
- %x = alloca float ; <float*> [#uses=2]
+ %y = alloca float ; <ptr> [#uses=2]
+ %x = alloca float ; <ptr> [#uses=2]
%"alloca point" = bitcast i32 0 to i32 ; <i32> [#uses=0]
- store float 0x7FF0000000000000, float* %x, align 4
- store float 3.500000e+00, float* %y, align 4
- %0 = load float, float* %y, align 4 ; <float> [#uses=1]
+ store float 0x7FF0000000000000, ptr %x, align 4
+ store float 3.500000e+00, ptr %y, align 4
+ %0 = load float, ptr %y, align 4 ; <float> [#uses=1]
%1 = fpext float %0 to double ; <double> [#uses=1]
- %2 = load float, float* %x, align 4 ; <float> [#uses=1]
+ %2 = load float, ptr %x, align 4 ; <float> [#uses=1]
%3 = fpext float %2 to double ; <double> [#uses=1]
%4 = frem double %3, %1 ; <double> [#uses=1]
- %5 = call i32 (i8*, ...) @printf(i8* getelementptr ([4 x i8], [4 x i8]* @"\01LC", i32 0, i32 0), double %4) nounwind ; <i32> [#uses=0]
+ %5 = call i32 (ptr, ...) @printf(ptr @"\01LC", double %4) nounwind ; <i32> [#uses=0]
br label %return
return: ; preds = %entry
define void @foo4() nounwind {
entry:
- %y = alloca float ; <float*> [#uses=2]
- %x = alloca float ; <float*> [#uses=2]
+ %y = alloca float ; <ptr> [#uses=2]
+ %x = alloca float ; <ptr> [#uses=2]
%"alloca point" = bitcast i32 0 to i32 ; <i32> [#uses=0]
- store float 0x7FF0000000000000, float* %x, align 4
- store float 0x7FF0000000000000, float* %y, align 4
- %0 = load float, float* %y, align 4 ; <float> [#uses=1]
+ store float 0x7FF0000000000000, ptr %x, align 4
+ store float 0x7FF0000000000000, ptr %y, align 4
+ %0 = load float, ptr %y, align 4 ; <float> [#uses=1]
%1 = fpext float %0 to double ; <double> [#uses=1]
- %2 = load float, float* %x, align 4 ; <float> [#uses=1]
+ %2 = load float, ptr %x, align 4 ; <float> [#uses=1]
%3 = fpext float %2 to double ; <double> [#uses=1]
%4 = frem double %3, %1 ; <double> [#uses=1]
- %5 = call i32 (i8*, ...) @printf(i8* getelementptr ([4 x i8], [4 x i8]* @"\01LC", i32 0, i32 0), double %4) nounwind ; <i32> [#uses=0]
+ %5 = call i32 (ptr, ...) @printf(ptr @"\01LC", double %4) nounwind ; <i32> [#uses=0]
br label %return
return: ; preds = %entry
define void @foo5() nounwind {
entry:
- %y = alloca float ; <float*> [#uses=2]
- %x = alloca float ; <float*> [#uses=2]
+ %y = alloca float ; <ptr> [#uses=2]
+ %x = alloca float ; <ptr> [#uses=2]
%"alloca point" = bitcast i32 0 to i32 ; <i32> [#uses=0]
- store float 0x7FF8000000000000, float* %x, align 4
- store float 0x7FF0000000000000, float* %y, align 4
- %0 = load float, float* %y, align 4 ; <float> [#uses=1]
+ store float 0x7FF8000000000000, ptr %x, align 4
+ store float 0x7FF0000000000000, ptr %y, align 4
+ %0 = load float, ptr %y, align 4 ; <float> [#uses=1]
%1 = fpext float %0 to double ; <double> [#uses=1]
- %2 = load float, float* %x, align 4 ; <float> [#uses=1]
+ %2 = load float, ptr %x, align 4 ; <float> [#uses=1]
%3 = fpext float %2 to double ; <double> [#uses=1]
%4 = frem double %3, %1 ; <double> [#uses=1]
- %5 = call i32 (i8*, ...) @printf(i8* getelementptr ([4 x i8], [4 x i8]* @"\01LC", i32 0, i32 0), double %4) nounwind ; <i32> [#uses=0]
+ %5 = call i32 (ptr, ...) @printf(ptr @"\01LC", double %4) nounwind ; <i32> [#uses=0]
br label %return
return: ; preds = %entry
define void @foo6() nounwind {
entry:
- %y = alloca float ; <float*> [#uses=2]
- %x = alloca float ; <float*> [#uses=2]
+ %y = alloca float ; <ptr> [#uses=2]
+ %x = alloca float ; <ptr> [#uses=2]
%"alloca point" = bitcast i32 0 to i32 ; <i32> [#uses=0]
- store float 0x7FF8000000000000, float* %x, align 4
- store float 0.000000e+00, float* %y, align 4
- %0 = load float, float* %y, align 4 ; <float> [#uses=1]
+ store float 0x7FF8000000000000, ptr %x, align 4
+ store float 0.000000e+00, ptr %y, align 4
+ %0 = load float, ptr %y, align 4 ; <float> [#uses=1]
%1 = fpext float %0 to double ; <double> [#uses=1]
- %2 = load float, float* %x, align 4 ; <float> [#uses=1]
+ %2 = load float, ptr %x, align 4 ; <float> [#uses=1]
%3 = fpext float %2 to double ; <double> [#uses=1]
%4 = frem double %3, %1 ; <double> [#uses=1]
- %5 = call i32 (i8*, ...) @printf(i8* getelementptr ([4 x i8], [4 x i8]* @"\01LC", i32 0, i32 0), double %4) nounwind ; <i32> [#uses=0]
+ %5 = call i32 (ptr, ...) @printf(ptr @"\01LC", double %4) nounwind ; <i32> [#uses=0]
br label %return
return: ; preds = %entry
define void @foo7() nounwind {
entry:
- %y = alloca float ; <float*> [#uses=2]
- %x = alloca float ; <float*> [#uses=2]
+ %y = alloca float ; <ptr> [#uses=2]
+ %x = alloca float ; <ptr> [#uses=2]
%"alloca point" = bitcast i32 0 to i32 ; <i32> [#uses=0]
- store float 0x7FF8000000000000, float* %x, align 4
- store float 3.500000e+00, float* %y, align 4
- %0 = load float, float* %y, align 4 ; <float> [#uses=1]
+ store float 0x7FF8000000000000, ptr %x, align 4
+ store float 3.500000e+00, ptr %y, align 4
+ %0 = load float, ptr %y, align 4 ; <float> [#uses=1]
%1 = fpext float %0 to double ; <double> [#uses=1]
- %2 = load float, float* %x, align 4 ; <float> [#uses=1]
+ %2 = load float, ptr %x, align 4 ; <float> [#uses=1]
%3 = fpext float %2 to double ; <double> [#uses=1]
%4 = frem double %3, %1 ; <double> [#uses=1]
- %5 = call i32 (i8*, ...) @printf(i8* getelementptr ([4 x i8], [4 x i8]* @"\01LC", i32 0, i32 0), double %4) nounwind ; <i32> [#uses=0]
+ %5 = call i32 (ptr, ...) @printf(ptr @"\01LC", double %4) nounwind ; <i32> [#uses=0]
br label %return
return: ; preds = %entry
define void @foo8() nounwind {
entry:
- %y = alloca float ; <float*> [#uses=2]
- %x = alloca float ; <float*> [#uses=2]
+ %y = alloca float ; <ptr> [#uses=2]
+ %x = alloca float ; <ptr> [#uses=2]
%"alloca point" = bitcast i32 0 to i32 ; <i32> [#uses=0]
- store float 0x7FF8000000000000, float* %x, align 4
- store float 0x7FF8000000000000, float* %y, align 4
- %0 = load float, float* %y, align 4 ; <float> [#uses=1]
+ store float 0x7FF8000000000000, ptr %x, align 4
+ store float 0x7FF8000000000000, ptr %y, align 4
+ %0 = load float, ptr %y, align 4 ; <float> [#uses=1]
%1 = fpext float %0 to double ; <double> [#uses=1]
- %2 = load float, float* %x, align 4 ; <float> [#uses=1]
+ %2 = load float, ptr %x, align 4 ; <float> [#uses=1]
%3 = fpext float %2 to double ; <double> [#uses=1]
%4 = frem double %3, %1 ; <double> [#uses=1]
- %5 = call i32 (i8*, ...) @printf(i8* getelementptr ([4 x i8], [4 x i8]* @"\01LC", i32 0, i32 0), double %4) nounwind ; <i32> [#uses=0]
+ %5 = call i32 (ptr, ...) @printf(ptr @"\01LC", double %4) nounwind ; <i32> [#uses=0]
br label %return
return: ; preds = %entry
define void @foo9() nounwind {
entry:
- %y = alloca float ; <float*> [#uses=2]
- %x = alloca float ; <float*> [#uses=2]
+ %y = alloca float ; <ptr> [#uses=2]
+ %x = alloca float ; <ptr> [#uses=2]
%"alloca point" = bitcast i32 0 to i32 ; <i32> [#uses=0]
- store float 0.000000e+00, float* %x, align 4
- store float 0x7FF8000000000000, float* %y, align 4
- %0 = load float, float* %y, align 4 ; <float> [#uses=1]
+ store float 0.000000e+00, ptr %x, align 4
+ store float 0x7FF8000000000000, ptr %y, align 4
+ %0 = load float, ptr %y, align 4 ; <float> [#uses=1]
%1 = fpext float %0 to double ; <double> [#uses=1]
- %2 = load float, float* %x, align 4 ; <float> [#uses=1]
+ %2 = load float, ptr %x, align 4 ; <float> [#uses=1]
%3 = fpext float %2 to double ; <double> [#uses=1]
%4 = frem double %3, %1 ; <double> [#uses=1]
- %5 = call i32 (i8*, ...) @printf(i8* getelementptr ([4 x i8], [4 x i8]* @"\01LC", i32 0, i32 0), double %4) nounwind ; <i32> [#uses=0]
+ %5 = call i32 (ptr, ...) @printf(ptr @"\01LC", double %4) nounwind ; <i32> [#uses=0]
br label %return
return: ; preds = %entry
define void @foo10() nounwind {
entry:
- %y = alloca float ; <float*> [#uses=2]
- %x = alloca float ; <float*> [#uses=2]
+ %y = alloca float ; <ptr> [#uses=2]
+ %x = alloca float ; <ptr> [#uses=2]
%"alloca point" = bitcast i32 0 to i32 ; <i32> [#uses=0]
- store float 0.000000e+00, float* %x, align 4
- store float 0x7FF0000000000000, float* %y, align 4
- %0 = load float, float* %y, align 4 ; <float> [#uses=1]
+ store float 0.000000e+00, ptr %x, align 4
+ store float 0x7FF0000000000000, ptr %y, align 4
+ %0 = load float, ptr %y, align 4 ; <float> [#uses=1]
%1 = fpext float %0 to double ; <double> [#uses=1]
- %2 = load float, float* %x, align 4 ; <float> [#uses=1]
+ %2 = load float, ptr %x, align 4 ; <float> [#uses=1]
%3 = fpext float %2 to double ; <double> [#uses=1]
%4 = frem double %3, %1 ; <double> [#uses=1]
- %5 = call i32 (i8*, ...) @printf(i8* getelementptr ([4 x i8], [4 x i8]* @"\01LC", i32 0, i32 0), double %4) nounwind ; <i32> [#uses=0]
+ %5 = call i32 (ptr, ...) @printf(ptr @"\01LC", double %4) nounwind ; <i32> [#uses=0]
br label %return
return: ; preds = %entry
define void @foo11() nounwind {
entry:
- %y = alloca float ; <float*> [#uses=2]
- %x = alloca float ; <float*> [#uses=2]
+ %y = alloca float ; <ptr> [#uses=2]
+ %x = alloca float ; <ptr> [#uses=2]
%"alloca point" = bitcast i32 0 to i32 ; <i32> [#uses=0]
- store float 0.000000e+00, float* %x, align 4
- store float 0.000000e+00, float* %y, align 4
- %0 = load float, float* %y, align 4 ; <float> [#uses=1]
+ store float 0.000000e+00, ptr %x, align 4
+ store float 0.000000e+00, ptr %y, align 4
+ %0 = load float, ptr %y, align 4 ; <float> [#uses=1]
%1 = fpext float %0 to double ; <double> [#uses=1]
- %2 = load float, float* %x, align 4 ; <float> [#uses=1]
+ %2 = load float, ptr %x, align 4 ; <float> [#uses=1]
%3 = fpext float %2 to double ; <double> [#uses=1]
%4 = frem double %3, %1 ; <double> [#uses=1]
- %5 = call i32 (i8*, ...) @printf(i8* getelementptr ([4 x i8], [4 x i8]* @"\01LC", i32 0, i32 0), double %4) nounwind ; <i32> [#uses=0]
+ %5 = call i32 (ptr, ...) @printf(ptr @"\01LC", double %4) nounwind ; <i32> [#uses=0]
br label %return
return: ; preds = %entry
define void @foo12() nounwind {
entry:
- %y = alloca float ; <float*> [#uses=2]
- %x = alloca float ; <float*> [#uses=2]
+ %y = alloca float ; <ptr> [#uses=2]
+ %x = alloca float ; <ptr> [#uses=2]
%"alloca point" = bitcast i32 0 to i32 ; <i32> [#uses=0]
- store float 0.000000e+00, float* %x, align 4
- store float 3.500000e+00, float* %y, align 4
- %0 = load float, float* %y, align 4 ; <float> [#uses=1]
+ store float 0.000000e+00, ptr %x, align 4
+ store float 3.500000e+00, ptr %y, align 4
+ %0 = load float, ptr %y, align 4 ; <float> [#uses=1]
%1 = fpext float %0 to double ; <double> [#uses=1]
- %2 = load float, float* %x, align 4 ; <float> [#uses=1]
+ %2 = load float, ptr %x, align 4 ; <float> [#uses=1]
%3 = fpext float %2 to double ; <double> [#uses=1]
%4 = frem double %3, %1 ; <double> [#uses=1]
- %5 = call i32 (i8*, ...) @printf(i8* getelementptr ([4 x i8], [4 x i8]* @"\01LC", i32 0, i32 0), double %4) nounwind ; <i32> [#uses=0]
+ %5 = call i32 (ptr, ...) @printf(ptr @"\01LC", double %4) nounwind ; <i32> [#uses=0]
br label %return
return: ; preds = %entry
define void @foo13() nounwind {
entry:
- %y = alloca float ; <float*> [#uses=2]
- %x = alloca float ; <float*> [#uses=2]
+ %y = alloca float ; <ptr> [#uses=2]
+ %x = alloca float ; <ptr> [#uses=2]
%"alloca point" = bitcast i32 0 to i32 ; <i32> [#uses=0]
- store float 3.500000e+00, float* %x, align 4
- store float 0x7FF8000000000000, float* %y, align 4
- %0 = load float, float* %y, align 4 ; <float> [#uses=1]
+ store float 3.500000e+00, ptr %x, align 4
+ store float 0x7FF8000000000000, ptr %y, align 4
+ %0 = load float, ptr %y, align 4 ; <float> [#uses=1]
%1 = fpext float %0 to double ; <double> [#uses=1]
- %2 = load float, float* %x, align 4 ; <float> [#uses=1]
+ %2 = load float, ptr %x, align 4 ; <float> [#uses=1]
%3 = fpext float %2 to double ; <double> [#uses=1]
%4 = frem double %3, %1 ; <double> [#uses=1]
- %5 = call i32 (i8*, ...) @printf(i8* getelementptr ([4 x i8], [4 x i8]* @"\01LC", i32 0, i32 0), double %4) nounwind ; <i32> [#uses=0]
+ %5 = call i32 (ptr, ...) @printf(ptr @"\01LC", double %4) nounwind ; <i32> [#uses=0]
br label %return
return: ; preds = %entry
define void @foo14() nounwind {
entry:
- %y = alloca float ; <float*> [#uses=2]
- %x = alloca float ; <float*> [#uses=2]
+ %y = alloca float ; <ptr> [#uses=2]
+ %x = alloca float ; <ptr> [#uses=2]
%"alloca point" = bitcast i32 0 to i32 ; <i32> [#uses=0]
- store float 3.500000e+00, float* %x, align 4
- store float 0x7FF0000000000000, float* %y, align 4
- %0 = load float, float* %y, align 4 ; <float> [#uses=1]
+ store float 3.500000e+00, ptr %x, align 4
+ store float 0x7FF0000000000000, ptr %y, align 4
+ %0 = load float, ptr %y, align 4 ; <float> [#uses=1]
%1 = fpext float %0 to double ; <double> [#uses=1]
- %2 = load float, float* %x, align 4 ; <float> [#uses=1]
+ %2 = load float, ptr %x, align 4 ; <float> [#uses=1]
%3 = fpext float %2 to double ; <double> [#uses=1]
%4 = frem double %3, %1 ; <double> [#uses=1]
- %5 = call i32 (i8*, ...) @printf(i8* getelementptr ([4 x i8], [4 x i8]* @"\01LC", i32 0, i32 0), double %4) nounwind ; <i32> [#uses=0]
+ %5 = call i32 (ptr, ...) @printf(ptr @"\01LC", double %4) nounwind ; <i32> [#uses=0]
br label %return
return: ; preds = %entry
define void @foo15() nounwind {
entry:
- %y = alloca float ; <float*> [#uses=2]
- %x = alloca float ; <float*> [#uses=2]
+ %y = alloca float ; <ptr> [#uses=2]
+ %x = alloca float ; <ptr> [#uses=2]
%"alloca point" = bitcast i32 0 to i32 ; <i32> [#uses=0]
- store float 3.500000e+00, float* %x, align 4
- store float 0.000000e+00, float* %y, align 4
- %0 = load float, float* %y, align 4 ; <float> [#uses=1]
+ store float 3.500000e+00, ptr %x, align 4
+ store float 0.000000e+00, ptr %y, align 4
+ %0 = load float, ptr %y, align 4 ; <float> [#uses=1]
%1 = fpext float %0 to double ; <double> [#uses=1]
- %2 = load float, float* %x, align 4 ; <float> [#uses=1]
+ %2 = load float, ptr %x, align 4 ; <float> [#uses=1]
%3 = fpext float %2 to double ; <double> [#uses=1]
%4 = frem double %3, %1 ; <double> [#uses=1]
- %5 = call i32 (i8*, ...) @printf(i8* getelementptr ([4 x i8], [4 x i8]* @"\01LC", i32 0, i32 0), double %4) nounwind ; <i32> [#uses=0]
+ %5 = call i32 (ptr, ...) @printf(ptr @"\01LC", double %4) nounwind ; <i32> [#uses=0]
br label %return
return: ; preds = %entry
define void @foo16() nounwind {
entry:
- %y = alloca float ; <float*> [#uses=2]
- %x = alloca float ; <float*> [#uses=2]
+ %y = alloca float ; <ptr> [#uses=2]
+ %x = alloca float ; <ptr> [#uses=2]
%"alloca point" = bitcast i32 0 to i32 ; <i32> [#uses=0]
- store float 3.500000e+00, float* %x, align 4
- store float 3.500000e+00, float* %y, align 4
- %0 = load float, float* %y, align 4 ; <float> [#uses=1]
+ store float 3.500000e+00, ptr %x, align 4
+ store float 3.500000e+00, ptr %y, align 4
+ %0 = load float, ptr %y, align 4 ; <float> [#uses=1]
%1 = fpext float %0 to double ; <double> [#uses=1]
- %2 = load float, float* %x, align 4 ; <float> [#uses=1]
+ %2 = load float, ptr %x, align 4 ; <float> [#uses=1]
%3 = fpext float %2 to double ; <double> [#uses=1]
%4 = frem double %3, %1 ; <double> [#uses=1]
- %5 = call i32 (i8*, ...) @printf(i8* getelementptr ([4 x i8], [4 x i8]* @"\01LC", i32 0, i32 0), double %4) nounwind ; <i32> [#uses=0]
+ %5 = call i32 (ptr, ...) @printf(ptr @"\01LC", double %4) nounwind ; <i32> [#uses=0]
br label %return
return: ; preds = %entry
define float @test1() nounwind {
entry:
- %retval = alloca float ; <float*> [#uses=2]
- %0 = alloca float ; <float*> [#uses=2]
+ %retval = alloca float ; <ptr> [#uses=2]
+ %0 = alloca float ; <ptr> [#uses=2]
%"alloca point" = bitcast i32 0 to i32 ; <i32> [#uses=0]
%1 = frem double 1.000000e-01, 1.000000e+00 ; <double> [#uses=1]
%2 = fptrunc double %1 to float ; <float> [#uses=1]
- store float %2, float* %0, align 4
- %3 = load float, float* %0, align 4 ; <float> [#uses=1]
- store float %3, float* %retval, align 4
+ store float %2, ptr %0, align 4
+ %3 = load float, ptr %0, align 4 ; <float> [#uses=1]
+ store float %3, ptr %retval, align 4
br label %return
return: ; preds = %entry
- %retval1 = load float, float* %retval ; <float> [#uses=1]
+ %retval1 = load float, ptr %retval ; <float> [#uses=1]
ret float %retval1
}
define float @test2() nounwind {
entry:
- %retval = alloca float ; <float*> [#uses=2]
- %0 = alloca float ; <float*> [#uses=2]
+ %retval = alloca float ; <ptr> [#uses=2]
+ %0 = alloca float ; <ptr> [#uses=2]
%"alloca point" = bitcast i32 0 to i32 ; <i32> [#uses=0]
%1 = frem double -1.000000e-01, 1.000000e+00 ; <double> [#uses=1]
%2 = fptrunc double %1 to float ; <float> [#uses=1]
- store float %2, float* %0, align 4
- %3 = load float, float* %0, align 4 ; <float> [#uses=1]
- store float %3, float* %retval, align 4
+ store float %2, ptr %0, align 4
+ %3 = load float, ptr %0, align 4 ; <float> [#uses=1]
+ store float %3, ptr %retval, align 4
br label %return
return: ; preds = %entry
- %retval1 = load float, float* %retval ; <float> [#uses=1]
+ %retval1 = load float, ptr %retval ; <float> [#uses=1]
ret float %retval1
}
define float @test3() nounwind {
entry:
- %retval = alloca float ; <float*> [#uses=2]
- %0 = alloca float ; <float*> [#uses=2]
+ %retval = alloca float ; <ptr> [#uses=2]
+ %0 = alloca float ; <ptr> [#uses=2]
%"alloca point" = bitcast i32 0 to i32 ; <i32> [#uses=0]
%1 = frem double 1.000000e-01, -1.000000e+00 ; <double> [#uses=1]
%2 = fptrunc double %1 to float ; <float> [#uses=1]
- store float %2, float* %0, align 4
- %3 = load float, float* %0, align 4 ; <float> [#uses=1]
- store float %3, float* %retval, align 4
+ store float %2, ptr %0, align 4
+ %3 = load float, ptr %0, align 4 ; <float> [#uses=1]
+ store float %3, ptr %retval, align 4
br label %return
return: ; preds = %entry
- %retval1 = load float, float* %retval ; <float> [#uses=1]
+ %retval1 = load float, ptr %retval ; <float> [#uses=1]
ret float %retval1
}
define float @test4() nounwind {
entry:
- %retval = alloca float ; <float*> [#uses=2]
- %0 = alloca float ; <float*> [#uses=2]
+ %retval = alloca float ; <ptr> [#uses=2]
+ %0 = alloca float ; <ptr> [#uses=2]
%"alloca point" = bitcast i32 0 to i32 ; <i32> [#uses=0]
%1 = frem double -1.000000e-01, -1.000000e+00 ; <double> [#uses=1]
%2 = fptrunc double %1 to float ; <float> [#uses=1]
- store float %2, float* %0, align 4
- %3 = load float, float* %0, align 4 ; <float> [#uses=1]
- store float %3, float* %retval, align 4
+ store float %2, ptr %0, align 4
+ %3 = load float, ptr %0, align 4 ; <float> [#uses=1]
+ store float %3, ptr %retval, align 4
br label %return
return: ; preds = %entry
- %retval1 = load float, float* %retval ; <float> [#uses=1]
+ %retval1 = load float, ptr %retval ; <float> [#uses=1]
ret float %retval1
}
target triple = "x86_64-unknown-linux-gnu"
%struct.atomic_t = type { i32 }
%struct.inode = type { i32, %struct.mutex }
- %struct.list_head = type { %struct.list_head*, %struct.list_head* }
+ %struct.list_head = type { ptr, ptr }
%struct.lock_class_key = type { }
%struct.mutex = type { %struct.atomic_t, %struct.rwlock_t, %struct.list_head }
%struct.rwlock_t = type { %struct.lock_class_key }
-define void @handle_event(%struct.inode* %bar) nounwind {
+define void @handle_event(ptr %bar) nounwind {
entry:
- %0 = getelementptr %struct.inode, %struct.inode* %bar, i64 -1, i32 1, i32 1 ; <%struct.rwlock_t*> [#uses=1]
- %1 = bitcast %struct.rwlock_t* %0 to i32* ; <i32*> [#uses=1]
- store i32 1, i32* %1, align 4
+ %0 = getelementptr %struct.inode, ptr %bar, i64 -1, i32 1, i32 1 ; <ptr> [#uses=1]
+ store i32 1, ptr %0, align 4
ret void
}
; RUN: opt < %s -passes=instcombine | llvm-dis
; PR3452
-define i128 @test(i64 %A, i64 %B, i1 %C, i128 %Z, i128 %Y, i64* %P, i64* %Q) {
+define i128 @test(i64 %A, i64 %B, i1 %C, i128 %Z, i128 %Y, ptr %P, ptr %Q) {
entry:
%tmp2 = trunc i128 %Z to i64
%tmp4 = trunc i128 %Y to i64
- store i64 %tmp2, i64* %P
- store i64 %tmp4, i64* %Q
+ store i64 %tmp2, ptr %P
+ store i64 %tmp4, ptr %Q
%x = sub i64 %tmp2, %tmp4
%c = sub i64 %tmp2, %tmp4
%tmp137 = zext i1 %C to i64
; Check that nocapture attributes are added when run after an SCC pass.
; PR3520
-define i32 @use(i8* %x) nounwind readonly {
-; CHECK: @use(i8* nocapture %x)
- %1 = tail call i64 @strlen(i8* %x) nounwind readonly
+define i32 @use(ptr %x) nounwind readonly {
+; CHECK: @use(ptr nocapture %x)
+ %1 = tail call i64 @strlen(ptr %x) nounwind readonly
%2 = trunc i64 %1 to i32
ret i32 %2
}
-declare i64 @strlen(i8*) nounwind readonly
-; CHECK: declare i64 @strlen(i8* nocapture) nounwind readonly
+declare i64 @strlen(ptr) nounwind readonly
+; CHECK: declare i64 @strlen(ptr nocapture) nounwind readonly
@.str1 = internal constant [4 x i8] c"\B5%8\00"
define i32 @test() {
- %rhsv = load i32, i32* bitcast ([4 x i8]* @.str1 to i32*), align 1
+ %rhsv = load i32, ptr @.str1, align 1
ret i32 %rhsv
}
target datalayout = "e-p:32:32:32-i1:8:8-i8:8:8-i16:16:16-i32:32:32-i64:32:64-f32:32:32-f64:32:64-v64:64:64-v128:128:128-a0:0:64-f80:32:32"
target triple = "i386-pc-linux-gnu"
-define void @_ada_c32001b(i32 %tmp5, i32* %src) {
+define void @_ada_c32001b(i32 %tmp5, ptr %src) {
entry:
%max289 = select i1 false, i32 %tmp5, i32 0 ; <i32> [#uses=1]
%tmp6 = mul i32 %max289, 4 ; <i32> [#uses=1]
- %tmp7 = alloca i8, i32 0 ; <i8*> [#uses=1]
- %tmp8 = bitcast i8* %tmp7 to [0 x [0 x i32]]* ; <[0 x [0 x i32]]*> [#uses=1]
- %tmp11 = load i32, i32* %src, align 1 ; <i32> [#uses=1]
+ %tmp7 = alloca i8, i32 0 ; <ptr> [#uses=1]
+ %tmp11 = load i32, ptr %src, align 1 ; <i32> [#uses=1]
%tmp12 = icmp eq i32 %tmp11, 3 ; <i1> [#uses=1]
%tmp13 = zext i1 %tmp12 to i8 ; <i8> [#uses=1]
%tmp14 = ashr i32 %tmp6, 2 ; <i32> [#uses=1]
- %tmp15 = bitcast [0 x [0 x i32]]* %tmp8 to i8* ; <i8*> [#uses=1]
%tmp16 = mul i32 %tmp14, 4 ; <i32> [#uses=1]
%tmp17 = mul i32 1, %tmp16 ; <i32> [#uses=1]
- %tmp18 = getelementptr i8, i8* %tmp15, i32 %tmp17 ; <i8*> [#uses=1]
- %tmp19 = bitcast i8* %tmp18 to [0 x i32]* ; <[0 x i32]*> [#uses=1]
- %tmp20 = bitcast [0 x i32]* %tmp19 to i32* ; <i32*> [#uses=1]
- %tmp21 = getelementptr i32, i32* %tmp20, i32 0 ; <i32*> [#uses=1]
- %tmp22 = load i32, i32* %tmp21, align 1 ; <i32> [#uses=1]
+ %tmp18 = getelementptr i8, ptr %tmp7, i32 %tmp17 ; <ptr> [#uses=1]
+ %tmp21 = getelementptr i32, ptr %tmp18, i32 0 ; <ptr> [#uses=1]
+ %tmp22 = load i32, ptr %tmp21, align 1 ; <i32> [#uses=1]
%tmp23 = icmp eq i32 %tmp22, 4 ; <i1> [#uses=1]
%tmp24 = zext i1 %tmp23 to i8 ; <i8> [#uses=1]
%toBool709 = icmp ne i8 %tmp13, 0 ; <i1> [#uses=1]
; RUN: opt < %s -passes=instcombine | llvm-dis
; PR3826
-define void @0(<4 x i16>*, <4 x i16>*) {
- %3 = alloca <4 x i16>* ; <<4 x i16>**> [#uses=1]
- %4 = load <4 x i16>, <4 x i16>* null, align 1 ; <<4 x i16>> [#uses=1]
+define void @0(ptr, ptr) {
+ %3 = alloca ptr ; <ptr> [#uses=1]
+ %4 = load <4 x i16>, ptr null, align 1 ; <<4 x i16>> [#uses=1]
%5 = ashr <4 x i16> %4, <i16 5, i16 5, i16 5, i16 5> ; <<4 x i16>> [#uses=1]
- %6 = load <4 x i16>*, <4 x i16>** %3 ; <<4 x i16>*> [#uses=1]
- store <4 x i16> %5, <4 x i16>* %6, align 1
+ %6 = load ptr, ptr %3 ; <ptr> [#uses=1]
+ store <4 x i16> %5, ptr %6, align 1
ret void
}
; RUN: opt < %s -passes=instcombine -S | not grep cmp
; rdar://6903175
-define i1 @f0(i32 *%a) nounwind {
- %b = load i32, i32* %a, align 4
+define i1 @f0(ptr %a) nounwind {
+ %b = load i32, ptr %a, align 4
%c = uitofp i32 %b to double
%d = fcmp ogt double %c, 0x41EFFFFFFFE00000
ret i1 %d
; PR4366
define void @a() {
- store i32 0, i32 addrspace(1)* null
+ store i32 0, ptr addrspace(1) null
ret void
}
@.str254 = internal constant [2 x i8] c".\00"
@.str557 = internal constant [3 x i8] c"::\00"
-define i8* @demangle_qualified(i32 %isfuncname) nounwind {
+define ptr @demangle_qualified(i32 %isfuncname) nounwind {
entry:
%tobool272 = icmp ne i32 %isfuncname, 0
- %cond276 = select i1 %tobool272, i8* getelementptr inbounds ([2 x i8], [2 x i8]* @.str254, i32 0, i32 0), i8* getelementptr inbounds ([3 x i8], [3 x i8]* @.str557, i32 0, i32 0) ; <i8*> [#uses=4]
- %cmp.i504 = icmp eq i8* %cond276, null
- %rval = getelementptr i8, i8* %cond276, i1 %cmp.i504
- ret i8* %rval
+ %cond276 = select i1 %tobool272, ptr @.str254, ptr @.str557 ; <ptr> [#uses=4]
+ %cmp.i504 = icmp eq ptr %cond276, null
+ %rval = getelementptr i8, ptr %cond276, i1 %cmp.i504
+ ret ptr %rval
}
; CHECK: %cond276 = select i1
-; CHECK: ret i8* %cond276
+; CHECK: ret ptr %cond276
target datalayout = "e-p:32:32:32-i1:8:8-i8:8:8-i16:16:16-i32:32:32-i64:32:64-f32:32:32-f64:32:64-v64:64:64-v128:128:128-a0:0:64-f80:32:32-n8:16:32"
target triple = "i386-unknown-linux-gnu"
-@g_92 = common global [2 x i32*] zeroinitializer, align 4 ; <[2 x i32*]*> [#uses=1]
-@g_177 = constant i32** bitcast (i8* getelementptr (i8, i8* bitcast ([2 x i32*]* @g_92 to i8*), i64 4) to i32**), align 4 ; <i32***> [#uses=1]
+@g_92 = common global [2 x ptr] zeroinitializer, align 4 ; <ptr> [#uses=1]
+@g_177 = constant ptr getelementptr (i8, ptr @g_92, i64 4), align 4 ; <ptr> [#uses=1]
define i1 @PR6486() nounwind {
; CHECK-LABEL: @PR6486(
- %tmp = load i32**, i32*** @g_177 ; <i32**> [#uses=1]
- %cmp = icmp ne i32** null, %tmp ; <i1> [#uses=1]
+ %tmp = load ptr, ptr @g_177 ; <ptr> [#uses=1]
+ %cmp = icmp ne ptr null, %tmp ; <i1> [#uses=1]
%conv = zext i1 %cmp to i32 ; <i32> [#uses=1]
%cmp1 = icmp sle i32 0, %conv ; <i1> [#uses=1]
ret i1 %cmp1
define i1 @PR16462_1() nounwind {
; CHECK-LABEL: @PR16462_1(
- ret i1 icmp sgt (i32 sext (i16 trunc (i32 select (i1 icmp eq (i32* getelementptr inbounds ([1 x i32], [1 x i32]* @a, i32 0, i32 0), i32* @d), i32 0, i32 1) to i16) to i32), i32 65535)
+ ret i1 icmp sgt (i32 sext (i16 trunc (i32 select (i1 icmp eq (ptr @a, ptr @d), i32 0, i32 1) to i16) to i32), i32 65535)
; CHECK: ret i1 false
}
define i1 @PR16462_2() nounwind {
; CHECK-LABEL: @PR16462_2(
- ret i1 icmp sgt (i32 sext (i16 trunc (i32 select (i1 icmp eq (i32* getelementptr inbounds ([1 x i32], [1 x i32]* @a, i32 0, i32 0), i32* @d), i32 0, i32 1) to i16) to i32), i32 42)
+ ret i1 icmp sgt (i32 sext (i16 trunc (i32 select (i1 icmp eq (ptr @a, ptr @d), i32 0, i32 1) to i16) to i32), i32 42)
; CHECK: ret i1 false
}
@.str = private constant [3 x i8] c"%s\00"
-define void @CopyEventArg(%union.anon* %ev, i8* %src) nounwind {
+define void @CopyEventArg(ptr %ev, ptr %src) nounwind {
; CHECK-LABEL: @CopyEventArg(
; CHECK-NEXT: entry:
-; CHECK-NEXT: [[CSTR:%.*]] = bitcast %union.anon* [[EV:%.*]] to i8*
-; CHECK-NEXT: [[STRCPY:%.*]] = call i8* @strcpy(i8* noundef nonnull dereferenceable(1) [[SRC:%.*]], i8* noundef nonnull dereferenceable(1) [[CSTR]])
+; CHECK-NEXT: [[STRCPY:%.*]] = call ptr @strcpy(ptr noundef nonnull dereferenceable(1) [[SRC:%.*]], ptr noundef nonnull dereferenceable(1) [[EV:%.*]])
; CHECK-NEXT: ret void
;
entry:
- %call = call i32 (i8*, i8*, ...) @sprintf(i8* %src, i8* getelementptr inbounds ([3 x i8], [3 x i8]* @.str, i64 0, i64 0), %union.anon* %ev) nounwind
+ %call = call i32 (ptr, ptr, ...) @sprintf(ptr %src, ptr @.str, ptr %ev) nounwind
ret void
}
-declare i32 @sprintf(i8*, i8*, ...)
+declare i32 @sprintf(ptr, ptr, ...)
target datalayout = "e-p:64:64:64-i1:8:8-i8:8:8-i16:16:16-i32:32:32-i64:64:64-f32:32:32-f64:64:64-v64:64:64-v128:128:128-a0:0:64-s0:64:64-f80:128:128"
-define {}* @foo({}* %x, i32 %n) {
+define ptr @foo(ptr %x, i32 %n) {
; CHECK-LABEL: @foo(
; CHECK-NOT: getelementptr
- %p = getelementptr {}, {}* %x, i32 %n
- ret {}* %p
+ %p = getelementptr {}, ptr %x, i32 %n
+ ret ptr %p
}
-define i8* @bar(i64 %n, {{}, [0 x {[0 x i8]}]}* %p) {
+define ptr @bar(i64 %n, ptr %p) {
; CHECK-LABEL: @bar(
- %g = getelementptr {{}, [0 x {[0 x i8]}]}, {{}, [0 x {[0 x i8]}]}* %p, i64 %n, i32 1, i64 %n, i32 0, i64 %n
+ %g = getelementptr {{}, [0 x {[0 x i8]}]}, ptr %p, i64 %n, i32 1, i64 %n, i32 0, i64 %n
; CHECK: %p, i64 0, i32 1, i64 0, i32 0, i64 %n
- ret i8* %g
+ ret ptr %g
}
target triple = "x86_64-unknown-linux-gnu"
-define <4 x float> @m_387(i8* noalias nocapture %A, i8* nocapture %B, <4 x i1> %C) nounwind {
+define <4 x float> @m_387(ptr noalias nocapture %A, ptr nocapture %B, <4 x i1> %C) nounwind {
entry:
%movcsext20 = sext <4 x i1> %C to <4 x i32>
%tmp2389 = xor <4 x i32> %movcsext20, <i32 -1, i32 -1, i32 -1, i32 -1>
entry:
%a.addr = alloca <2 x i16>, align 4
%.compoundliteral = alloca <2 x i16>, align 4
- store <2 x i16> %a, <2 x i16>* %a.addr, align 4
- %tmp = load <2 x i16>, <2 x i16>* %a.addr, align 4
- store <2 x i16> zeroinitializer, <2 x i16>* %.compoundliteral
- %tmp1 = load <2 x i16>, <2 x i16>* %.compoundliteral
+ store <2 x i16> %a, ptr %a.addr, align 4
+ %tmp = load <2 x i16>, ptr %a.addr, align 4
+ store <2 x i16> zeroinitializer, ptr %.compoundliteral
+ %tmp1 = load <2 x i16>, ptr %.compoundliteral
%cmp = icmp uge <2 x i16> %tmp, %tmp1
%sext = sext <2 x i1> %cmp to <2 x i16>
ret <2 x i16> %sext
%tmp2 = add i32 %argc, 1879048192
%p = alloca i8
; CHECK: getelementptr
- %p1 = getelementptr i8, i8* %p, i32 %tmp1
+ %p1 = getelementptr i8, ptr %p, i32 %tmp1
; CHECK: getelementptr
- %p2 = getelementptr i8, i8* %p, i32 %tmp2
- %cmp = icmp ult i8* %p1, %p2
+ %p2 = getelementptr i8, ptr %p, i32 %tmp2
+ %cmp = icmp ult ptr %p1, %p2
br i1 %cmp, label %bbtrue, label %bbfalse
bbtrue: ; preds = %entry
ret i32 -1
;
%on_off.addr = alloca i32, align 4
%a = alloca i32, align 4
- store i32 %on_off, i32* %on_off.addr, align 4
- %tmp = load i32, i32* %on_off.addr, align 4
+ store i32 %on_off, ptr %on_off.addr, align 4
+ %tmp = load i32, ptr %on_off.addr, align 4
%sub = sub i32 1, %tmp
%mul = mul i32 %sub, -2
- store i32 %mul, i32* %a, align 4
- %tmp1 = load i32, i32* %a, align 4
+ store i32 %mul, ptr %a, align 4
+ %tmp1 = load i32, ptr %a, align 4
%conv = trunc i32 %tmp1 to i16
ret i16 %conv
}
%on_off.addr = alloca i32, align 4
%q.addr = alloca i32, align 4
%a = alloca i32, align 4
- store i32 %on_off, i32* %on_off.addr, align 4
- store i32 %q, i32* %q.addr, align 4
- %tmp = load i32, i32* %q.addr, align 4
- %tmp1 = load i32, i32* %on_off.addr, align 4
+ store i32 %on_off, ptr %on_off.addr, align 4
+ store i32 %q, ptr %q.addr, align 4
+ %tmp = load i32, ptr %q.addr, align 4
+ %tmp1 = load i32, ptr %on_off.addr, align 4
%sub = sub i32 %tmp, %tmp1
%mul = mul i32 %sub, -4
- store i32 %mul, i32* %a, align 4
- %tmp2 = load i32, i32* %a, align 4
+ store i32 %mul, ptr %a, align 4
+ %tmp2 = load i32, ptr %a, align 4
%conv = trunc i32 %tmp2 to i16
ret i16 %conv
}
;
%on_off.addr = alloca i32, align 4
%a = alloca i32, align 4
- store i32 %on_off, i32* %on_off.addr, align 4
- %tmp = load i32, i32* %on_off.addr, align 4
+ store i32 %on_off, ptr %on_off.addr, align 4
+ %tmp = load i32, ptr %on_off.addr, align 4
%sub = sub i32 7, %tmp
%mul = mul i32 %sub, -4
- store i32 %mul, i32* %a, align 4
- %tmp1 = load i32, i32* %a, align 4
+ store i32 %mul, ptr %a, align 4
+ %tmp1 = load i32, ptr %a, align 4
%conv = trunc i32 %tmp1 to i16
ret i16 %conv
}
@G = external global [0 x %opaque_struct]
-declare void @foo(%opaque_struct*)
+declare void @foo(ptr)
define void @bar() {
- call void @foo(%opaque_struct* bitcast ([0 x %opaque_struct]* @G to %opaque_struct*))
+ call void @foo(ptr @G)
ret void
}
@.str = private unnamed_addr constant [35 x i8] c"\0Ain_range input (should be 0): %f\0A\00", align 1
@.str1 = external hidden unnamed_addr constant [35 x i8], align 1
-declare i32 @printf(i8*, ...)
+declare i32 @printf(ptr, ...)
define i64 @_Z8tempCastj(i32 %val) uwtable ssp {
entry:
- %call = call i32 (i8*, ...) @printf(i8* getelementptr inbounds ([35 x i8], [35 x i8]* @.str1, i64 0, i64 0), i32 %val)
+ %call = call i32 (ptr, ...) @printf(ptr @.str1, i32 %val)
%conv = uitofp i32 %val to double
- %call.i = call i32 (i8*, ...) @printf(i8* getelementptr inbounds ([35 x i8], [35 x i8]* @.str, i64 0, i64 0), double %conv)
+ %call.i = call i32 (ptr, ...) @printf(ptr @.str, double %conv)
%cmp.i = fcmp oge double %conv, -1.000000e+00
br i1 %cmp.i, label %land.rhs.i, label %if.end.critedge
; CHECK: br i1 true, label %land.rhs.i, label %if.end.critedge
; Derived from gcc.c-torture/execute/frame-address.c
-define i32 @func(i8* %c, i8* %f) nounwind uwtable readnone noinline ssp {
+define i32 @func(ptr %c, ptr %f) nounwind uwtable readnone noinline ssp {
; CHECK-LABEL: @func(
; CHECK-NEXT: entry:
; CHECK-NEXT: [[D:%.*]] = alloca i8, align 1
-; CHECK-NEXT: store i8 0, i8* [[D]], align 1
-; CHECK-NEXT: [[CMP:%.*]] = icmp ugt i8* [[D]], [[C:%.*]]
+; CHECK-NEXT: store i8 0, ptr [[D]], align 1
+; CHECK-NEXT: [[CMP:%.*]] = icmp ugt ptr [[D]], [[C:%.*]]
; CHECK-NEXT: br i1 [[CMP]], label [[IF_ELSE:%.*]], label [[IF_THEN:%.*]]
; CHECK: if.then:
-; CHECK-NEXT: [[CMP2:%.*]] = icmp ule i8* [[D]], [[F:%.*]]
-; CHECK-NEXT: [[NOT_CMP1:%.*]] = icmp uge i8* [[C]], [[F]]
+; CHECK-NEXT: [[CMP2:%.*]] = icmp ule ptr [[D]], [[F:%.*]]
+; CHECK-NEXT: [[NOT_CMP1:%.*]] = icmp uge ptr [[C]], [[F]]
; CHECK-NEXT: [[DOTCMP2:%.*]] = and i1 [[CMP2]], [[NOT_CMP1]]
; CHECK-NEXT: br label [[RETURN:%.*]]
; CHECK: if.else:
-; CHECK-NEXT: [[CMP5:%.*]] = icmp uge i8* [[D]], [[F]]
-; CHECK-NEXT: [[NOT_CMP3:%.*]] = icmp ule i8* [[C]], [[F]]
+; CHECK-NEXT: [[CMP5:%.*]] = icmp uge ptr [[D]], [[F]]
+; CHECK-NEXT: [[NOT_CMP3:%.*]] = icmp ule ptr [[C]], [[F]]
; CHECK-NEXT: [[DOTCMP5:%.*]] = and i1 [[CMP5]], [[NOT_CMP3]]
; CHECK-NEXT: br label [[RETURN]]
; CHECK: return:
;
entry:
%d = alloca i8, align 1
- store i8 0, i8* %d, align 1
- %cmp = icmp ugt i8* %d, %c
+ store i8 0, ptr %d, align 1
+ %cmp = icmp ugt ptr %d, %c
br i1 %cmp, label %if.else, label %if.then
if.then: ; preds = %entry
- %cmp2 = icmp ule i8* %d, %f
- %not.cmp1 = icmp uge i8* %c, %f
+ %cmp2 = icmp ule ptr %d, %f
+ %not.cmp1 = icmp uge ptr %c, %f
%.cmp2 = and i1 %cmp2, %not.cmp1
%land.ext = zext i1 %.cmp2 to i32
br label %return
if.else: ; preds = %entry
- %cmp5 = icmp uge i8* %d, %f
- %not.cmp3 = icmp ule i8* %c, %f
+ %cmp5 = icmp uge ptr %d, %f
+ %not.cmp3 = icmp ule ptr %c, %f
%.cmp5 = and i1 %cmp5, %not.cmp3
%land.ext7 = zext i1 %.cmp5 to i32
br label %return
ret i32 %retval.0
}
-define i32 @func_logical(i8* %c, i8* %f) nounwind uwtable readnone noinline ssp {
+define i32 @func_logical(ptr %c, ptr %f) nounwind uwtable readnone noinline ssp {
; CHECK-LABEL: @func_logical(
; CHECK-NEXT: entry:
; CHECK-NEXT: [[D:%.*]] = alloca i8, align 1
-; CHECK-NEXT: store i8 0, i8* [[D]], align 1
-; CHECK-NEXT: [[CMP:%.*]] = icmp ugt i8* [[D]], [[C:%.*]]
+; CHECK-NEXT: store i8 0, ptr [[D]], align 1
+; CHECK-NEXT: [[CMP:%.*]] = icmp ugt ptr [[D]], [[C:%.*]]
; CHECK-NEXT: br i1 [[CMP]], label [[IF_ELSE:%.*]], label [[IF_THEN:%.*]]
; CHECK: if.then:
-; CHECK-NEXT: [[CMP2:%.*]] = icmp ule i8* [[D]], [[F:%.*]]
-; CHECK-NEXT: [[NOT_CMP1:%.*]] = icmp uge i8* [[C]], [[F]]
+; CHECK-NEXT: [[CMP2:%.*]] = icmp ule ptr [[D]], [[F:%.*]]
+; CHECK-NEXT: [[NOT_CMP1:%.*]] = icmp uge ptr [[C]], [[F]]
; CHECK-NEXT: [[DOTCMP2:%.*]] = select i1 [[CMP2]], i1 [[NOT_CMP1]], i1 false
; CHECK-NEXT: br label [[RETURN:%.*]]
; CHECK: if.else:
-; CHECK-NEXT: [[CMP5:%.*]] = icmp uge i8* [[D]], [[F]]
-; CHECK-NEXT: [[NOT_CMP3:%.*]] = icmp ule i8* [[C]], [[F]]
+; CHECK-NEXT: [[CMP5:%.*]] = icmp uge ptr [[D]], [[F]]
+; CHECK-NEXT: [[NOT_CMP3:%.*]] = icmp ule ptr [[C]], [[F]]
; CHECK-NEXT: [[DOTCMP5:%.*]] = select i1 [[CMP5]], i1 [[NOT_CMP3]], i1 false
; CHECK-NEXT: br label [[RETURN]]
; CHECK: return:
;
entry:
%d = alloca i8, align 1
- store i8 0, i8* %d, align 1
- %cmp = icmp ugt i8* %d, %c
+ store i8 0, ptr %d, align 1
+ %cmp = icmp ugt ptr %d, %c
br i1 %cmp, label %if.else, label %if.then
if.then: ; preds = %entry
- %cmp2 = icmp ule i8* %d, %f
- %not.cmp1 = icmp uge i8* %c, %f
+ %cmp2 = icmp ule ptr %d, %f
+ %not.cmp1 = icmp uge ptr %c, %f
%.cmp2 = select i1 %cmp2, i1 %not.cmp1, i1 false
%land.ext = zext i1 %.cmp2 to i32
br label %return
if.else: ; preds = %entry
- %cmp5 = icmp uge i8* %d, %f
- %not.cmp3 = icmp ule i8* %c, %f
+ %cmp5 = icmp uge ptr %d, %f
+ %not.cmp3 = icmp ule ptr %c, %f
%.cmp5 = select i1 %cmp5, i1 %not.cmp3, i1 false
%land.ext7 = zext i1 %.cmp5 to i32
br label %return
define void @func() nounwind uwtable ssp {
entry:
- %0 = load i8, i8* @c, align 1
+ %0 = load i8, ptr @c, align 1
%conv = zext i8 %0 to i32
%or = or i32 %conv, 1
%conv1 = trunc i32 %or to i8
- store i8 %conv1, i8* @a, align 1
+ store i8 %conv1, ptr @a, align 1
%conv2 = zext i8 %conv1 to i32
%neg = xor i32 %conv2, -1
%and = and i32 1, %neg
%conv3 = trunc i32 %and to i8
- store i8 %conv3, i8* @b, align 1
- %1 = load i8, i8* @a, align 1
+ store i8 %conv3, ptr @b, align 1
+ %1 = load i8, ptr @a, align 1
%conv4 = zext i8 %1 to i32
%conv5 = zext i8 %conv3 to i32
%tobool = icmp ne i32 %conv4, 0
%land.ext = zext i1 %2 to i32
%mul = mul nsw i32 3, %land.ext
%conv9 = trunc i32 %mul to i8
- store i8 %conv9, i8* @a, align 1
+ store i8 %conv9, ptr @a, align 1
ret void
; CHECK-LABEL: @func(
; RUN: opt < %s -passes=instcombine -S | FileCheck %s
; <rdar://problem/10889741>
-define void @func(double %r, double %g, double %b, double* %outH, double* %outS, double* %outL) nounwind uwtable ssp {
+define void @func(double %r, double %g, double %b, ptr %outH, ptr %outS, ptr %outL) nounwind uwtable ssp {
bb:
%tmp = alloca double, align 8
%tmp1 = alloca double, align 8
%tmp2 = alloca double, align 8
- store double %r, double* %tmp, align 8
- store double %g, double* %tmp1, align 8
- store double %b, double* %tmp2, align 8
+ store double %r, ptr %tmp, align 8
+ store double %g, ptr %tmp1, align 8
+ store double %b, ptr %tmp2, align 8
%tmp3 = fcmp ogt double %r, %g
br i1 %tmp3, label %bb4, label %bb8
br label %bb12
bb12: ; preds = %bb11, %bb10, %bb7, %bb6
- %max.0 = phi double* [ %tmp, %bb6 ], [ %tmp2, %bb7 ], [ %tmp1, %bb10 ], [ %tmp2, %bb11 ]
-; CHECK: %tmp13 = load double, double* %tmp, align 8
-; CHECK: %tmp14 = load double, double* %tmp1, align 8
+ %max.0 = phi ptr [ %tmp, %bb6 ], [ %tmp2, %bb7 ], [ %tmp1, %bb10 ], [ %tmp2, %bb11 ]
+; CHECK: %tmp13 = load double, ptr %tmp, align 8
+; CHECK: %tmp14 = load double, ptr %tmp1, align 8
; CHECK: %tmp15 = fcmp olt double %tmp13, %tmp14
- %tmp13 = load double, double* %tmp, align 8
- %tmp14 = load double, double* %tmp1, align 8
+ %tmp13 = load double, ptr %tmp, align 8
+ %tmp14 = load double, ptr %tmp1, align 8
%tmp15 = fcmp olt double %tmp13, %tmp14
br i1 %tmp15, label %bb16, label %bb21
bb16: ; preds = %bb12
- %tmp17 = load double, double* %tmp2, align 8
+ %tmp17 = load double, ptr %tmp2, align 8
%tmp18 = fcmp olt double %tmp13, %tmp17
br i1 %tmp18, label %bb19, label %bb20
br label %bb26
bb21: ; preds = %bb12
- %tmp22 = load double, double* %tmp2, align 8
+ %tmp22 = load double, ptr %tmp2, align 8
%tmp23 = fcmp olt double %tmp14, %tmp22
br i1 %tmp23, label %bb24, label %bb25
br label %bb26
bb26: ; preds = %bb25, %bb24, %bb20, %bb19
- %min.0 = phi double* [ %tmp, %bb19 ], [ %tmp2, %bb20 ], [ %tmp1, %bb24 ], [ %tmp2, %bb25 ]
-; CHECK: %tmp27 = load double, double* %min.0, align 8
-; CHECK: %tmp28 = load double, double* %max.0
+ %min.0 = phi ptr [ %tmp, %bb19 ], [ %tmp2, %bb20 ], [ %tmp1, %bb24 ], [ %tmp2, %bb25 ]
+; CHECK: %tmp27 = load double, ptr %min.0, align 8
+; CHECK: %tmp28 = load double, ptr %max.0
; CHECK: %tmp29 = fadd double %tmp27, %tmp28
- %tmp27 = load double, double* %min.0, align 8
- %tmp28 = load double, double* %max.0
+ %tmp27 = load double, ptr %min.0, align 8
+ %tmp28 = load double, ptr %max.0
%tmp29 = fadd double %tmp27, %tmp28
%tmp30 = fdiv double %tmp29, 2.000000e+00
- store double %tmp30, double* %outL
- %tmp31 = load double, double* %min.0
- %tmp32 = load double, double* %max.0
+ store double %tmp30, ptr %outL
+ %tmp31 = load double, ptr %min.0
+ %tmp32 = load double, ptr %max.0
%tmp33 = fcmp oeq double %tmp31, %tmp32
br i1 %tmp33, label %bb34, label %bb35
bb34: ; preds = %bb26
- store double 0.000000e+00, double* %outS
- store double 0.000000e+00, double* %outH
+ store double 0.000000e+00, ptr %outS
+ store double 0.000000e+00, ptr %outH
br label %bb81
bb35: ; preds = %bb26
bb38: ; preds = %bb35
%tmp39 = fadd double %tmp32, %tmp31
%tmp40 = fdiv double %tmp37, %tmp39
- store double %tmp40, double* %outS
+ store double %tmp40, ptr %outS
br label %bb45
bb41: ; preds = %bb35
%tmp42 = fsub double 2.000000e+00, %tmp32
%tmp43 = fsub double %tmp42, %tmp31
%tmp44 = fdiv double %tmp37, %tmp43
- store double %tmp44, double* %outS
+ store double %tmp44, ptr %outS
br label %bb45
bb45: ; preds = %bb41, %bb38
- %tmp46 = icmp eq double* %max.0, %tmp
+ %tmp46 = icmp eq ptr %max.0, %tmp
br i1 %tmp46, label %bb47, label %bb55
bb47: ; preds = %bb45
- %tmp48 = load double, double* %tmp1, align 8
- %tmp49 = load double, double* %tmp2, align 8
+ %tmp48 = load double, ptr %tmp1, align 8
+ %tmp49 = load double, ptr %tmp2, align 8
%tmp50 = fsub double %tmp48, %tmp49
- %tmp51 = load double, double* %max.0
- %tmp52 = load double, double* %min.0
+ %tmp51 = load double, ptr %max.0
+ %tmp52 = load double, ptr %min.0
%tmp53 = fsub double %tmp51, %tmp52
%tmp54 = fdiv double %tmp50, %tmp53
- store double %tmp54, double* %outH
+ store double %tmp54, ptr %outH
br label %bb75
bb55: ; preds = %bb45
- %tmp56 = icmp eq double* %max.0, %tmp1
+ %tmp56 = icmp eq ptr %max.0, %tmp1
br i1 %tmp56, label %bb57, label %bb66
bb57: ; preds = %bb55
- %tmp58 = load double, double* %tmp2, align 8
- %tmp59 = load double, double* %tmp, align 8
+ %tmp58 = load double, ptr %tmp2, align 8
+ %tmp59 = load double, ptr %tmp, align 8
%tmp60 = fsub double %tmp58, %tmp59
- %tmp61 = load double, double* %max.0
- %tmp62 = load double, double* %min.0
+ %tmp61 = load double, ptr %max.0
+ %tmp62 = load double, ptr %min.0
%tmp63 = fsub double %tmp61, %tmp62
%tmp64 = fdiv double %tmp60, %tmp63
%tmp65 = fadd double 2.000000e+00, %tmp64
- store double %tmp65, double* %outH
+ store double %tmp65, ptr %outH
br label %bb75
bb66: ; preds = %bb55
- %tmp67 = load double, double* %tmp, align 8
- %tmp68 = load double, double* %tmp1, align 8
+ %tmp67 = load double, ptr %tmp, align 8
+ %tmp68 = load double, ptr %tmp1, align 8
%tmp69 = fsub double %tmp67, %tmp68
- %tmp70 = load double, double* %max.0
- %tmp71 = load double, double* %min.0
+ %tmp70 = load double, ptr %max.0
+ %tmp71 = load double, ptr %min.0
%tmp72 = fsub double %tmp70, %tmp71
%tmp73 = fdiv double %tmp69, %tmp72
%tmp74 = fadd double 4.000000e+00, %tmp73
- store double %tmp74, double* %outH
+ store double %tmp74, ptr %outH
br label %bb75
bb75: ; preds = %bb66, %bb57, %bb47
- %tmp76 = load double, double* %outH
+ %tmp76 = load double, ptr %outH
%tmp77 = fdiv double %tmp76, 6.000000e+00
- store double %tmp77, double* %outH
+ store double %tmp77, ptr %outH
%tmp78 = fcmp olt double %tmp77, 0.000000e+00
br i1 %tmp78, label %bb79, label %bb81
bb79: ; preds = %bb75
%tmp80 = fadd double %tmp77, 1.000000e+00
- store double %tmp80, double* %outH
+ store double %tmp80, ptr %outH
br label %bb81
bb81: ; preds = %bb79, %bb75, %bb34
@test = constant [4 x i32] [i32 1, i32 2, i32 3, i32 4]
define i64 @foo() {
- %ret = load i64, i64* bitcast (i8* getelementptr (i8, i8* bitcast ([4 x i32]* @test to i8*), i64 2) to i64*), align 1
+ %ret = load i64, ptr getelementptr (i8, ptr @test, i64 2), align 1
ret i64 %ret
; 0x00030000_00020000 in [01 00/00 00 02 00 00 00 03 00/00 00 04 00 00 00]
; LE: ret i64 844424930263040
; CHECK: addrspacecast
@base = internal unnamed_addr addrspace(3) global [16 x i32] zeroinitializer, align 16
-declare void @foo(i32*)
+declare void @foo(ptr)
define void @test() nounwind {
- call void @foo(i32* getelementptr (i32, i32* addrspacecast ([16 x i32] addrspace(3)* @base to i32*), i64 2147483647)) nounwind
+ call void @foo(ptr getelementptr (i32, ptr addrspacecast (ptr addrspace(3) @base to ptr), i64 2147483647)) nounwind
ret void
}
target datalayout = "e-p:64:64:64-i1:8:8-i8:8:8-i16:16:16-i32:32:32-i64:64:64-f32:32:32-f64:64:64-v64:64:64-v128:128:128-a0:0:64-s0:64:64-f80:128:128-n8:16:32:64-S128"
target triple = "x86_64-apple-macosx10.8.0"
-@x = global i8* null, align 8
-@y = global i8* null, align 8
+@x = global ptr null, align 8
+@y = global ptr null, align 8
; CHECK-LABEL: @f(
; CHECK-NEXT: alloca [0 x i8], align 1024
define void @f() {
%1 = alloca [0 x i8], align 1
%2 = alloca [0 x i8], align 1024
- %3 = getelementptr inbounds [0 x i8], [0 x i8]* %1, i64 0, i64 0
- %4 = getelementptr inbounds [0 x i8], [0 x i8]* %2, i64 0, i64 0
- store i8* %3, i8** @x, align 8
- store i8* %4, i8** @y, align 8
+ store ptr %1, ptr @x, align 8
+ store ptr %2, ptr @y, align 8
ret void
}
target datalayout = "e-p:64:64:64-i1:8:8-i8:8:8-i16:16:16-i32:32:32-i64:64:64-f32:32:32-f64:64:64-v64:64:64-v128:128:128-a0:0:64-s0:64:64-f80:128:128-n8:16:32:64-S128"
target triple = "x86_64-apple-macosx10.8.0"
-%struct.hoge = type { double*, double*, double*, double** }
+%struct.hoge = type { ptr, ptr, ptr, ptr }
-define void @widget(%struct.hoge* nocapture %arg, i1 %c1, i1 %c2, i1 %c3, i1 %c4, i1 %c5) {
+define void @widget(ptr nocapture %arg, i1 %c1, i1 %c2, i1 %c3, i1 %c4, i1 %c5) {
bb:
- %tmp = getelementptr inbounds %struct.hoge, %struct.hoge* %arg, i64 0, i32 0
br i1 %c1, label %bb1, label %bb17
bb1: ; preds = %bb
br label %bb17
bb3: ; preds = %bb1
- %tmp4 = bitcast double** %tmp to <2 x double*>*
- %tmp5 = load <2 x double*>, <2 x double*>* %tmp4, align 8
- %tmp6 = ptrtoint <2 x double*> %tmp5 to <2 x i64>
+ %tmp5 = load <2 x ptr>, ptr %arg, align 8
+ %tmp6 = ptrtoint <2 x ptr> %tmp5 to <2 x i64>
%tmp7 = sub <2 x i64> zeroinitializer, %tmp6
%tmp8 = ashr exact <2 x i64> %tmp7, <i64 3, i64 3>
%tmp9 = extractelement <2 x i64> %tmp8, i32 0
target datalayout = "e-p:32:32:32-i1:8:8-i8:8:8-i16:16:16-i32:32:32-i64:32:64-f32:32:32-f64:32:64-v64:64:64-v128:128:128-a0:0:64-f80:128:128"
-define <4 x i32> @foo(<4 x i32*>* %in) {
- %t17 = load <4 x i32*>, <4 x i32*>* %in, align 8
- %t18 = icmp eq <4 x i32*> %t17, zeroinitializer
+define <4 x i32> @foo(ptr %in) {
+ %t17 = load <4 x ptr>, ptr %in, align 8
+ %t18 = icmp eq <4 x ptr> %t17, zeroinitializer
%t19 = zext <4 x i1> %t18 to <4 x i32>
ret <4 x i32> %t19
}
define i32 @function(i32 %x) nounwind {
entry:
%xor = xor i32 %x, 1
- store volatile i32 %xor, i32* inttoptr (i64 1 to i32*), align 4
- %or4 = or i32 or (i32 zext (i1 icmp eq (i32* @g, i32* null) to i32), i32 1), %xor
+ store volatile i32 %xor, ptr inttoptr (i64 1 to ptr), align 4
+ %or4 = or i32 or (i32 zext (i1 icmp eq (ptr @g, ptr null) to i32), i32 1), %xor
ret i32 %or4
}
; CHECK-LABEL: define i32 @function(
; CHECK-LABEL: @function(
; CHECK-NEXT: entry:
; CHECK-NEXT: %retval = alloca %struct._my_struct, align 8
-; CHECK-NOT: bitcast i96* %retval to %struct._my_struct*
+; CHECK-NOT: bitcast ptr %retval to ptr
entry:
%retval = alloca %struct._my_struct, align 8
- %k.sroa.0.0.copyload = load i96, i96* bitcast (%struct._my_struct* @initval to i96*), align 1
- %k.sroa.1.12.copyload = load i32, i32* bitcast ([4 x i8]* getelementptr inbounds (%struct._my_struct, %struct._my_struct* @initval, i64 0, i32 1) to i32*), align 1
+ %k.sroa.0.0.copyload = load i96, ptr @initval, align 1
+ %k.sroa.1.12.copyload = load i32, ptr getelementptr inbounds (%struct._my_struct, ptr @initval, i64 0, i32 1), align 1
%0 = zext i32 %x to i96
%bf.value = shl nuw nsw i96 %0, 6
%bf.clear = and i96 %k.sroa.0.0.copyload, -288230376151711744
%bf.clear4 = or i96 %bf.shl3, %bf.value.masked
%bf.set5 = or i96 %bf.clear4, %bf.value8
%bf.set10 = or i96 %bf.set5, %bf.clear
- %retval.0.cast7 = bitcast %struct._my_struct* %retval to i96*
- store i96 %bf.set10, i96* %retval.0.cast7, align 8
- %retval.12.idx8 = getelementptr inbounds %struct._my_struct, %struct._my_struct* %retval, i64 0, i32 1
- %retval.12.cast9 = bitcast [4 x i8]* %retval.12.idx8 to i32*
- store i32 %k.sroa.1.12.copyload, i32* %retval.12.cast9, align 4
+ store i96 %bf.set10, ptr %retval, align 8
+ %retval.12.idx8 = getelementptr inbounds %struct._my_struct, ptr %retval, i64 0, i32 1
+ store i32 %k.sroa.1.12.copyload, ptr %retval.12.idx8, align 4
%trunc = trunc i96 %bf.set10 to i64
%.fca.0.insert = insertvalue { i64, i64 } undef, i64 %trunc, 0
- %retval.8.idx12 = getelementptr inbounds %struct._my_struct, %struct._my_struct* %retval, i64 0, i32 0, i64 8
- %retval.8.cast13 = bitcast i8* %retval.8.idx12 to i64*
- %retval.8.load14 = load i64, i64* %retval.8.cast13, align 8
+ %retval.8.idx12 = getelementptr inbounds %struct._my_struct, ptr %retval, i64 0, i32 0, i64 8
+ %retval.8.load14 = load i64, ptr %retval.8.idx12, align 8
%.fca.1.insert = insertvalue { i64, i64 } %.fca.0.insert, i64 %retval.8.load14, 1
ret { i64, i64 } %.fca.1.insert
}
!0 = !{i16 0, i16 32768} ; [0, 32767]
!1 = !{i16 0, i16 32769} ; [0, 32768]
-define i16 @add_bounded_values(i16 %a, i16 %b) personality i8* bitcast (i32 (...)* @__gxx_personality_v0 to i8*) {
+define i16 @add_bounded_values(i16 %a, i16 %b) personality ptr @__gxx_personality_v0 {
; CHECK-LABEL: @add_bounded_values(
; CHECK-NEXT: entry:
; CHECK-NEXT: [[C:%.*]] = call i16 @bounded(i16 [[A:%.*]]), !range [[RNG0:![0-9]+]]
; CHECK-NEXT: [[E:%.*]] = add nuw i16 [[C]], [[D]]
; CHECK-NEXT: ret i16 [[E]]
; CHECK: lpad:
-; CHECK-NEXT: [[TMP0:%.*]] = landingpad { i8*, i32 }
-; CHECK-NEXT: filter [0 x i8*] zeroinitializer
+; CHECK-NEXT: [[TMP0:%.*]] = landingpad { ptr, i32 }
+; CHECK-NEXT: filter [0 x ptr] zeroinitializer
; CHECK-NEXT: ret i16 42
;
entry:
%e = add i16 %c, %d
ret i16 %e
lpad:
- %0 = landingpad { i8*, i32 }
- filter [0 x i8*] zeroinitializer
+ %0 = landingpad { ptr, i32 }
+ filter [0 x ptr] zeroinitializer
ret i16 42
}
-define i16 @add_bounded_values_2(i16 %a, i16 %b) personality i8* bitcast (i32 (...)* @__gxx_personality_v0 to i8*) {
+define i16 @add_bounded_values_2(i16 %a, i16 %b) personality ptr @__gxx_personality_v0 {
; CHECK-LABEL: @add_bounded_values_2(
; CHECK-NEXT: entry:
; CHECK-NEXT: [[C:%.*]] = call i16 @bounded(i16 [[A:%.*]]), !range [[RNG1:![0-9]+]]
; CHECK-NEXT: [[E:%.*]] = add i16 [[C]], [[D]]
; CHECK-NEXT: ret i16 [[E]]
; CHECK: lpad:
-; CHECK-NEXT: [[TMP0:%.*]] = landingpad { i8*, i32 }
-; CHECK-NEXT: filter [0 x i8*] zeroinitializer
+; CHECK-NEXT: [[TMP0:%.*]] = landingpad { ptr, i32 }
+; CHECK-NEXT: filter [0 x ptr] zeroinitializer
; CHECK-NEXT: ret i16 42
;
entry:
%e = add i16 %c, %d
ret i16 %e
lpad:
- %0 = landingpad { i8*, i32 }
- filter [0 x i8*] zeroinitializer
+ %0 = landingpad { ptr, i32 }
+ filter [0 x ptr] zeroinitializer
ret i16 42
}
; Instcombine should propagate the load through the select instructions to
; allow elimination of the extra stuff by the mem2reg pass.
-define void @_Z5test1RiS_(i32* %x, i32* %y) {
+define void @_Z5test1RiS_(ptr %x, ptr %y) {
; CHECK-LABEL: @_Z5test1RiS_(
; CHECK-NEXT: entry:
-; CHECK-NEXT: [[TMP_1_I:%.*]] = load i32, i32* [[Y:%.*]], align 4
-; CHECK-NEXT: [[TMP_3_I:%.*]] = load i32, i32* [[X:%.*]], align 4
+; CHECK-NEXT: [[TMP_1_I:%.*]] = load i32, ptr [[Y:%.*]], align 4
+; CHECK-NEXT: [[TMP_3_I:%.*]] = load i32, ptr [[X:%.*]], align 4
; CHECK-NEXT: [[TMP0:%.*]] = call i32 @llvm.smin.i32(i32 [[TMP_1_I]], i32 [[TMP_3_I]])
-; CHECK-NEXT: store i32 [[TMP0]], i32* [[X]], align 4
+; CHECK-NEXT: store i32 [[TMP0]], ptr [[X]], align 4
; CHECK-NEXT: ret void
;
entry:
- %tmp.1.i = load i32, i32* %y ; <i32> [#uses=1]
- %tmp.3.i = load i32, i32* %x ; <i32> [#uses=1]
+ %tmp.1.i = load i32, ptr %y ; <i32> [#uses=1]
+ %tmp.3.i = load i32, ptr %x ; <i32> [#uses=1]
%tmp.4.i = icmp slt i32 %tmp.1.i, %tmp.3.i ; <i1> [#uses=1]
- %retval.i = select i1 %tmp.4.i, i32* %y, i32* %x ; <i32*> [#uses=1]
- %tmp.4 = load i32, i32* %retval.i ; <i32> [#uses=1]
- store i32 %tmp.4, i32* %x
+ %retval.i = select i1 %tmp.4.i, ptr %y, ptr %x ; <ptr> [#uses=1]
+ %tmp.4 = load i32, ptr %retval.i ; <i32> [#uses=1]
+ store i32 %tmp.4, ptr %x
ret void
}
-define void @_Z5test2RiS_(i32* %x, i32* %y) {
+define void @_Z5test2RiS_(ptr %x, ptr %y) {
; CHECK-LABEL: @_Z5test2RiS_(
; CHECK-NEXT: entry:
-; CHECK-NEXT: [[TMP_2:%.*]] = load i32, i32* [[X:%.*]], align 4
-; CHECK-NEXT: [[TMP_3_I:%.*]] = load i32, i32* [[Y:%.*]], align 4
+; CHECK-NEXT: [[TMP_2:%.*]] = load i32, ptr [[X:%.*]], align 4
+; CHECK-NEXT: [[TMP_3_I:%.*]] = load i32, ptr [[Y:%.*]], align 4
; CHECK-NEXT: [[TMP0:%.*]] = call i32 @llvm.smax.i32(i32 [[TMP_2]], i32 [[TMP_3_I]])
-; CHECK-NEXT: store i32 [[TMP0]], i32* [[Y]], align 4
+; CHECK-NEXT: store i32 [[TMP0]], ptr [[Y]], align 4
; CHECK-NEXT: ret void
;
entry:
- %tmp.0 = alloca i32 ; <i32*> [#uses=2]
- %tmp.2 = load i32, i32* %x ; <i32> [#uses=2]
- store i32 %tmp.2, i32* %tmp.0
- %tmp.3.i = load i32, i32* %y ; <i32> [#uses=1]
+ %tmp.0 = alloca i32 ; <ptr> [#uses=2]
+ %tmp.2 = load i32, ptr %x ; <i32> [#uses=2]
+ store i32 %tmp.2, ptr %tmp.0
+ %tmp.3.i = load i32, ptr %y ; <i32> [#uses=1]
%tmp.4.i = icmp slt i32 %tmp.2, %tmp.3.i ; <i1> [#uses=1]
- %retval.i = select i1 %tmp.4.i, i32* %y, i32* %tmp.0 ; <i32*> [#uses=1]
- %tmp.6 = load i32, i32* %retval.i ; <i32> [#uses=1]
- store i32 %tmp.6, i32* %y
+ %retval.i = select i1 %tmp.4.i, ptr %y, ptr %tmp.0 ; <ptr> [#uses=1]
+ %tmp.6 = load i32, ptr %retval.i ; <i32> [#uses=1]
+ store i32 %tmp.6, ptr %y
ret void
}
; RUN: opt -S -inline -instcombine %s | FileCheck %s --check-prefixes=CHECK,CHECK-INLINE
; RUN: opt -S -instcombine %s | FileCheck %s --check-prefixes=CHECK,CHECK-NOINLINE
-define i8* @widen_align_from_allocalign_callsite() {
+define ptr @widen_align_from_allocalign_callsite() {
; CHECK-LABEL: @widen_align_from_allocalign_callsite(
; CHECK-NEXT: entry:
-; CHECK-NEXT: [[CALL:%.*]] = tail call align 64 i8* @my_aligned_alloc_2(i32 noundef 320, i32 allocalign noundef 64)
-; CHECK-NEXT: ret i8* [[CALL]]
+; CHECK-NEXT: [[CALL:%.*]] = tail call align 64 ptr @my_aligned_alloc_2(i32 noundef 320, i32 allocalign noundef 64)
+; CHECK-NEXT: ret ptr [[CALL]]
;
entry:
- %call = tail call align 16 i8* @my_aligned_alloc_2(i32 noundef 320, i32 allocalign noundef 64)
- ret i8* %call
+ %call = tail call align 16 ptr @my_aligned_alloc_2(i32 noundef 320, i32 allocalign noundef 64)
+ ret ptr %call
}
-define i8* @widen_align_from_allocalign() {
+define ptr @widen_align_from_allocalign() {
; CHECK-LABEL: @widen_align_from_allocalign(
; CHECK-NEXT: entry:
-; CHECK-NEXT: [[CALL:%.*]] = tail call align 64 i8* @my_aligned_alloc(i32 noundef 320, i32 noundef 64)
-; CHECK-NEXT: ret i8* [[CALL]]
+; CHECK-NEXT: [[CALL:%.*]] = tail call align 64 ptr @my_aligned_alloc(i32 noundef 320, i32 noundef 64)
+; CHECK-NEXT: ret ptr [[CALL]]
;
entry:
- %call = tail call align 16 i8* @my_aligned_alloc(i32 noundef 320, i32 noundef 64)
- ret i8* %call
+ %call = tail call align 16 ptr @my_aligned_alloc(i32 noundef 320, i32 noundef 64)
+ ret ptr %call
}
-define i8* @dont_narrow_align_from_allocalign() {
+define ptr @dont_narrow_align_from_allocalign() {
; CHECK-LABEL: @dont_narrow_align_from_allocalign(
; CHECK-NEXT: entry:
-; CHECK-NEXT: [[CALL:%.*]] = tail call align 16 i8* @my_aligned_alloc(i32 noundef 320, i32 noundef 8)
-; CHECK-NEXT: ret i8* [[CALL]]
+; CHECK-NEXT: [[CALL:%.*]] = tail call align 16 ptr @my_aligned_alloc(i32 noundef 320, i32 noundef 8)
+; CHECK-NEXT: ret ptr [[CALL]]
;
entry:
- %call = tail call align 16 i8* @my_aligned_alloc(i32 noundef 320, i32 noundef 8)
- ret i8* %call
+ %call = tail call align 16 ptr @my_aligned_alloc(i32 noundef 320, i32 noundef 8)
+ ret ptr %call
}
-define i8* @my_aligned_alloc_3(i32 noundef %foo, i32 allocalign %alignment) {
+define ptr @my_aligned_alloc_3(i32 noundef %foo, i32 allocalign %alignment) {
; CHECK-LABEL: @my_aligned_alloc_3(
-; CHECK-NEXT: [[CALL:%.*]] = tail call i8* @my_aligned_alloc_2(i32 noundef [[FOO:%.*]], i32 noundef [[ALIGNMENT:%.*]])
-; CHECK-NEXT: ret i8* [[CALL]]
+; CHECK-NEXT: [[CALL:%.*]] = tail call ptr @my_aligned_alloc_2(i32 noundef [[FOO:%.*]], i32 noundef [[ALIGNMENT:%.*]])
+; CHECK-NEXT: ret ptr [[CALL]]
;
- %call = tail call i8* @my_aligned_alloc_2(i32 noundef %foo, i32 noundef %alignment)
- ret i8* %call
+ %call = tail call ptr @my_aligned_alloc_2(i32 noundef %foo, i32 noundef %alignment)
+ ret ptr %call
}
; -inline is able to make my_aligned_alloc_3's arguments disappear and directly
; figure out the `align 128` on the return value once the call is directly on
; my_aligned_alloc_2. Note that this is a simplified version of what happens
; with _mm_malloc which calls posix_memalign.
-define i8* @allocalign_disappears() {
+define ptr @allocalign_disappears() {
; CHECK-INLINE-LABEL: @allocalign_disappears(
-; CHECK-INLINE-NEXT: [[CALL_I:%.*]] = tail call i8* @my_aligned_alloc_2(i32 noundef 42, i32 noundef 128)
-; CHECK-INLINE-NEXT: ret i8* [[CALL_I]]
+; CHECK-INLINE-NEXT: [[CALL_I:%.*]] = tail call ptr @my_aligned_alloc_2(i32 noundef 42, i32 noundef 128)
+; CHECK-INLINE-NEXT: ret ptr [[CALL_I]]
;
; CHECK-NOINLINE-LABEL: @allocalign_disappears(
-; CHECK-NOINLINE-NEXT: [[CALL:%.*]] = tail call align 128 i8* @my_aligned_alloc_3(i32 42, i32 128)
-; CHECK-NOINLINE-NEXT: ret i8* [[CALL]]
+; CHECK-NOINLINE-NEXT: [[CALL:%.*]] = tail call align 128 ptr @my_aligned_alloc_3(i32 42, i32 128)
+; CHECK-NOINLINE-NEXT: ret ptr [[CALL]]
;
- %call = tail call i8* @my_aligned_alloc_3(i32 42, i32 128)
- ret i8* %call
+ %call = tail call ptr @my_aligned_alloc_3(i32 42, i32 128)
+ ret ptr %call
}
-declare i8* @my_aligned_alloc(i32 noundef, i32 allocalign noundef)
-declare i8* @my_aligned_alloc_2(i32 noundef, i32 noundef)
+declare ptr @my_aligned_alloc(i32 noundef, i32 allocalign noundef)
+declare ptr @my_aligned_alloc_2(i32 noundef, i32 noundef)
; RUN: opt < %s -passes=instcombine -S | FileCheck %s
target datalayout = "e-p:32:32"
-define i32* @test(i32* %P) {
- %V = ptrtoint i32* %P to i32 ; <i32> [#uses=1]
- %P2 = inttoptr i32 %V to i32* ; <i32*> [#uses=1]
- ret i32* %P2
-; CHECK: ret i32* %P
+define ptr @test(ptr %P) {
+ %V = ptrtoint ptr %P to i32 ; <i32> [#uses=1]
+ %P2 = inttoptr i32 %V to ptr ; <ptr> [#uses=1]
+ ret ptr %P2
+; CHECK: ret ptr %P
}
@T2 = external constant i32
@T3 = external constant i32
-declare i32 @generic_personality(i32, i64, i8*, i8*)
-declare i32 @__gxx_personality_v0(i32, i64, i8*, i8*)
-declare i32 @__objc_personality_v0(i32, i64, i8*, i8*)
+declare i32 @generic_personality(i32, i64, ptr, ptr)
+declare i32 @__gxx_personality_v0(i32, i64, ptr, ptr)
+declare i32 @__objc_personality_v0(i32, i64, ptr, ptr)
declare i32 @__C_specific_handler(...)
declare void @bar()
-define void @foo_generic() personality i32 (i32, i64, i8*, i8*)* @generic_personality {
+define void @foo_generic() personality ptr @generic_personality {
; CHECK-LABEL: @foo_generic(
invoke void @bar()
to label %cont.a unwind label %lpad.a
ret void
lpad.a:
- %a = landingpad { i8*, i32 }
- catch i32* @T1
- catch i32* @T2
- catch i32* @T1
- catch i32* @T2
+ %a = landingpad { ptr, i32 }
+ catch ptr @T1
+ catch ptr @T2
+ catch ptr @T1
+ catch ptr @T2
unreachable
; CHECK: %a = landingpad
; CHECK-NEXT: @T1
; CHECK-NEXT: unreachable
lpad.b:
- %b = landingpad { i8*, i32 }
- filter [0 x i32*] zeroinitializer
- catch i32* @T1
+ %b = landingpad { ptr, i32 }
+ filter [0 x ptr] zeroinitializer
+ catch ptr @T1
unreachable
; CHECK: %b = landingpad
; CHECK-NEXT: filter
; CHECK-NEXT: unreachable
lpad.c:
- %c = landingpad { i8*, i32 }
- catch i32* @T1
- filter [1 x i32*] [i32* @T1]
- catch i32* @T2
+ %c = landingpad { ptr, i32 }
+ catch ptr @T1
+ filter [1 x ptr] [ptr @T1]
+ catch ptr @T2
unreachable
; Caught types should not be removed from filters
; CHECK: %c = landingpad
-; CHECK-NEXT: catch i32* @T1
-; CHECK-NEXT: filter [1 x i32*] [i32* @T1]
-; CHECK-NEXT: catch i32* @T2
+; CHECK-NEXT: catch ptr @T1
+; CHECK-NEXT: filter [1 x ptr] [ptr @T1]
+; CHECK-NEXT: catch ptr @T2
; CHECK-NEXT: unreachable
lpad.d:
- %d = landingpad { i8*, i32 }
- filter [3 x i32*] zeroinitializer
+ %d = landingpad { ptr, i32 }
+ filter [3 x ptr] zeroinitializer
unreachable
; CHECK: %d = landingpad
-; CHECK-NEXT: filter [1 x i32*] zeroinitializer
+; CHECK-NEXT: filter [1 x ptr] zeroinitializer
; CHECK-NEXT: unreachable
lpad.e:
- %e = landingpad { i8*, i32 }
- catch i32* @T1
- filter [3 x i32*] [i32* @T1, i32* @T2, i32* @T2]
+ %e = landingpad { ptr, i32 }
+ catch ptr @T1
+ filter [3 x ptr] [ptr @T1, ptr @T2, ptr @T2]
unreachable
; Caught types should not be removed from filters
; CHECK: %e = landingpad
-; CHECK-NEXT: catch i32* @T1
-; CHECK-NEXT: filter [2 x i32*] [i32* @T1, i32* @T2]
+; CHECK-NEXT: catch ptr @T1
+; CHECK-NEXT: filter [2 x ptr] [ptr @T1, ptr @T2]
; CHECK-NEXT: unreachable
lpad.f:
- %f = landingpad { i8*, i32 }
- filter [2 x i32*] [i32* @T2, i32* @T1]
- filter [1 x i32*] [i32* @T1]
+ %f = landingpad { ptr, i32 }
+ filter [2 x ptr] [ptr @T2, ptr @T1]
+ filter [1 x ptr] [ptr @T1]
unreachable
; CHECK: %f = landingpad
-; CHECK-NEXT: filter [1 x i32*] [i32* @T1]
+; CHECK-NEXT: filter [1 x ptr] [ptr @T1]
; CHECK-NEXT: unreachable
lpad.g:
- %g = landingpad { i8*, i32 }
- filter [1 x i32*] [i32* @T1]
- catch i32* @T3
- filter [2 x i32*] [i32* @T2, i32* @T1]
+ %g = landingpad { ptr, i32 }
+ filter [1 x ptr] [ptr @T1]
+ catch ptr @T3
+ filter [2 x ptr] [ptr @T2, ptr @T1]
unreachable
; CHECK: %g = landingpad
-; CHECK-NEXT: filter [1 x i32*] [i32* @T1]
-; CHECK-NEXT: catch i32* @T3
+; CHECK-NEXT: filter [1 x ptr] [ptr @T1]
+; CHECK-NEXT: catch ptr @T3
; CHECK-NEXT: unreachable
lpad.h:
- %h = landingpad { i8*, i32 }
- filter [2 x i32*] [i32* @T1, i32* null]
- filter [1 x i32*] zeroinitializer
+ %h = landingpad { ptr, i32 }
+ filter [2 x ptr] [ptr @T1, ptr null]
+ filter [1 x ptr] zeroinitializer
unreachable
; CHECK: %h = landingpad
-; CHECK-NEXT: filter [1 x i32*] zeroinitializer
+; CHECK-NEXT: filter [1 x ptr] zeroinitializer
; CHECK-NEXT: unreachable
lpad.i:
- %i = landingpad { i8*, i32 }
+ %i = landingpad { ptr, i32 }
cleanup
- filter [0 x i32*] zeroinitializer
+ filter [0 x ptr] zeroinitializer
unreachable
; CHECK: %i = landingpad
; CHECK-NEXT: filter
; CHECK-NEXT: unreachable
}
-define void @foo_cxx() personality i32 (i32, i64, i8*, i8*)* @__gxx_personality_v0 {
+define void @foo_cxx() personality ptr @__gxx_personality_v0 {
; CHECK-LABEL: @foo_cxx(
invoke void @bar()
to label %cont.a unwind label %lpad.a
ret void
lpad.a:
- %a = landingpad { i8*, i32 }
- catch i32* null
- catch i32* @T1
+ %a = landingpad { ptr, i32 }
+ catch ptr null
+ catch ptr @T1
unreachable
; CHECK: %a = landingpad
; CHECK-NEXT: null
; CHECK-NEXT: unreachable
lpad.b:
- %b = landingpad { i8*, i32 }
- filter [1 x i32*] zeroinitializer
+ %b = landingpad { ptr, i32 }
+ filter [1 x ptr] zeroinitializer
unreachable
; CHECK: %b = landingpad
; CHECK-NEXT: cleanup
; CHECK-NEXT: unreachable
lpad.c:
- %c = landingpad { i8*, i32 }
- filter [2 x i32*] [i32* @T1, i32* null]
+ %c = landingpad { ptr, i32 }
+ filter [2 x ptr] [ptr @T1, ptr null]
unreachable
; CHECK: %c = landingpad
; CHECK-NEXT: cleanup
; CHECK-NEXT: unreachable
lpad.d:
- %d = landingpad { i8*, i32 }
+ %d = landingpad { ptr, i32 }
cleanup
- catch i32* null
+ catch ptr null
unreachable
; CHECK: %d = landingpad
; CHECK-NEXT: null
; CHECK-NEXT: unreachable
}
-define void @foo_objc() personality i32 (i32, i64, i8*, i8*)* @__objc_personality_v0 {
+define void @foo_objc() personality ptr @__objc_personality_v0 {
; CHECK-LABEL: @foo_objc(
invoke void @bar()
to label %cont.a unwind label %lpad.a
ret void
lpad.a:
- %a = landingpad { i8*, i32 }
- catch i32* null
- catch i32* @T1
+ %a = landingpad { ptr, i32 }
+ catch ptr null
+ catch ptr @T1
unreachable
; CHECK: %a = landingpad
; CHECK-NEXT: null
; CHECK-NEXT: unreachable
lpad.b:
- %b = landingpad { i8*, i32 }
- filter [1 x i32*] zeroinitializer
+ %b = landingpad { ptr, i32 }
+ filter [1 x ptr] zeroinitializer
unreachable
; CHECK: %b = landingpad
; CHECK-NEXT: cleanup
; CHECK-NEXT: unreachable
lpad.c:
- %c = landingpad { i8*, i32 }
- filter [2 x i32*] [i32* @T1, i32* null]
+ %c = landingpad { ptr, i32 }
+ filter [2 x ptr] [ptr @T1, ptr null]
unreachable
; CHECK: %c = landingpad
; CHECK-NEXT: cleanup
; CHECK-NEXT: unreachable
lpad.d:
- %d = landingpad { i8*, i32 }
+ %d = landingpad { ptr, i32 }
cleanup
- catch i32* null
+ catch ptr null
unreachable
; CHECK: %d = landingpad
; CHECK-NEXT: null
; CHECK-NEXT: unreachable
}
-define void @foo_seh() personality i32 (...)* @__C_specific_handler {
+define void @foo_seh() personality ptr @__C_specific_handler {
; CHECK-LABEL: @foo_seh(
invoke void @bar()
to label %cont.a unwind label %lpad.a
ret void
lpad.a:
- %a = landingpad { i8*, i32 }
- catch i32* null
- catch i32* @T1
+ %a = landingpad { ptr, i32 }
+ catch ptr null
+ catch ptr @T1
unreachable
; CHECK: %a = landingpad
; CHECK-NEXT: null
; CHECK-NEXT: unreachable
lpad.b:
- %b = landingpad { i8*, i32 }
- filter [1 x i32*] zeroinitializer
+ %b = landingpad { ptr, i32 }
+ filter [1 x ptr] zeroinitializer
unreachable
; CHECK: %b = landingpad
; CHECK-NEXT: cleanup
; CHECK-NEXT: unreachable
lpad.c:
- %c = landingpad { i8*, i32 }
- filter [2 x i32*] [i32* @T1, i32* null]
+ %c = landingpad { ptr, i32 }
+ filter [2 x ptr] [ptr @T1, ptr null]
unreachable
; CHECK: %c = landingpad
; CHECK-NEXT: cleanup
; CHECK-NEXT: unreachable
lpad.d:
- %d = landingpad { i8*, i32 }
+ %d = landingpad { ptr, i32 }
cleanup
- catch i32* null
+ catch ptr null
unreachable
; CHECK: %d = landingpad
; CHECK-NEXT: null
; Check that we can find and remove redundant insertvalues
; CHECK-LABEL: foo_simple
-; CHECK-NOT: i8* %x, 0
-define { i8*, i64, i32 } @foo_simple(i8* %x, i8* %y) nounwind {
+; CHECK-NOT: ptr %x, 0
+define { ptr, i64, i32 } @foo_simple(ptr %x, ptr %y) nounwind {
entry:
- %0 = insertvalue { i8*, i64, i32 } undef, i8* %x, 0
- %1 = insertvalue { i8*, i64, i32 } %0, i8* %y, 0
- ret { i8*, i64, i32 } %1
+ %0 = insertvalue { ptr, i64, i32 } undef, ptr %x, 0
+ %1 = insertvalue { ptr, i64, i32 } %0, ptr %y, 0
+ ret { ptr, i64, i32 } %1
}
; Check that we can find and remove redundant nodes in insertvalues chain
; CHECK-LABEL: foo_ovwrt_chain
; CHECK-NOT: i64 %y, 1
; CHECK-NOT: i32 555, 2
-define { i8*, i64, i32 } @foo_ovwrt_chain(i8* %x, i64 %y, i64 %z) nounwind {
+define { ptr, i64, i32 } @foo_ovwrt_chain(ptr %x, i64 %y, i64 %z) nounwind {
entry:
- %0 = insertvalue { i8*, i64, i32 } undef, i8* %x, 0
- %1 = insertvalue { i8*, i64, i32 } %0, i64 %y, 1
- %2 = insertvalue { i8*, i64, i32 } %1, i32 555, 2
- %3 = insertvalue { i8*, i64, i32 } %2, i64 %z, 1
- %4 = insertvalue { i8*, i64, i32 } %3, i32 777, 2
- ret { i8*, i64, i32 } %4
+ %0 = insertvalue { ptr, i64, i32 } undef, ptr %x, 0
+ %1 = insertvalue { ptr, i64, i32 } %0, i64 %y, 1
+ %2 = insertvalue { ptr, i64, i32 } %1, i32 555, 2
+ %3 = insertvalue { ptr, i64, i32 } %2, i64 %z, 1
+ %4 = insertvalue { ptr, i64, i32 } %3, i32 777, 2
+ ret { ptr, i64, i32 } %4
}
; Check that we propagate insertvalues only if they are use as the first
; operand (as initial value of aggregate)
target triple = "x86_64-unknown-linux-gnu"
; Function Attrs: readonly uwtable
-define i1 @dot_ref_s(i32** noalias nocapture readonly dereferenceable(8)) {
+define i1 @dot_ref_s(ptr noalias nocapture readonly dereferenceable(8)) {
; CHECK-LABEL: @dot_ref_s(
; CHECK-NEXT: entry-block:
; CHECK-NEXT: ret i1 false
;
entry-block:
- %loadedptr = load i32*, i32** %0, align 8, !nonnull !0
- %ptrtoint = ptrtoint i32* %loadedptr to i64
- %inttoptr = inttoptr i64 %ptrtoint to i32*
- %switchtmp = icmp eq i32* %inttoptr, null
+ %loadedptr = load ptr, ptr %0, align 8, !nonnull !0
+ %ptrtoint = ptrtoint ptr %loadedptr to i64
+ %inttoptr = inttoptr i64 %ptrtoint to ptr
+ %switchtmp = icmp eq ptr %inttoptr, null
ret i1 %switchtmp
}
; Function Attrs: readonly uwtable
-define i64* @function(i64* noalias nocapture readonly dereferenceable(8)) {
+define ptr @function(ptr noalias nocapture readonly dereferenceable(8)) {
; CHECK-LABEL: @function(
; CHECK-NEXT: entry-block:
-; CHECK-NEXT: [[LOADED:%.*]] = load i64, i64* [[TMP0:%.*]], align 8, !range [[RNG0:![0-9]+]]
-; CHECK-NEXT: [[INTTOPTR:%.*]] = inttoptr i64 [[LOADED]] to i64*
-; CHECK-NEXT: ret i64* [[INTTOPTR]]
+; CHECK-NEXT: [[LOADED:%.*]] = load i64, ptr [[TMP0:%.*]], align 8, !range [[RNG0:![0-9]+]]
+; CHECK-NEXT: [[INTTOPTR:%.*]] = inttoptr i64 [[LOADED]] to ptr
+; CHECK-NEXT: ret ptr [[INTTOPTR]]
;
entry-block:
- %loaded = load i64, i64* %0, align 8, !range !1
- %inttoptr = inttoptr i64 %loaded to i64*
- ret i64* %inttoptr
+ %loaded = load i64, ptr %0, align 8, !range !1
+ %inttoptr = inttoptr i64 %loaded to ptr
+ ret ptr %inttoptr
}
; NOTE: Assertions have been autogenerated by utils/update_test_checks.py
; RUN: opt -instcombine -S < %s | FileCheck %s
-define void @PR37526(i32* %pz, i32* %px, i32* %py) {
+define void @PR37526(ptr %pz, ptr %px, ptr %py) {
; CHECK-LABEL: @PR37526(
-; CHECK-NEXT: [[T1:%.*]] = bitcast i32* [[PZ:%.*]] to i64*
-; CHECK-NEXT: [[T2:%.*]] = load i32, i32* [[PY:%.*]], align 4
-; CHECK-NEXT: [[T3:%.*]] = load i32, i32* [[PX:%.*]], align 4
+; CHECK-NEXT: [[T2:%.*]] = load i32, ptr [[PY:%.*]], align 4
+; CHECK-NEXT: [[T3:%.*]] = load i32, ptr [[PX:%.*]], align 4
; CHECK-NEXT: [[CMP:%.*]] = icmp slt i32 [[T2]], [[T3]]
-; CHECK-NEXT: [[SELECT:%.*]] = select i1 [[CMP]], i32* [[PX]], i32* [[PY]]
-; CHECK-NEXT: [[BC:%.*]] = bitcast i32* [[SELECT]] to i64*
-; CHECK-NEXT: [[R:%.*]] = load i64, i64* [[BC]], align 4
-; CHECK-NEXT: store i64 [[R]], i64* [[T1]], align 4
+; CHECK-NEXT: [[SELECT:%.*]] = select i1 [[CMP]], ptr [[PX]], ptr [[PY]]
+; CHECK-NEXT: [[R:%.*]] = load i64, ptr [[SELECT]], align 4
+; CHECK-NEXT: store i64 [[R]], ptr [[PZ:%.*]], align 4
; CHECK-NEXT: ret void
;
- %t1 = bitcast i32* %pz to i64*
- %t2 = load i32, i32* %py
- %t3 = load i32, i32* %px
+ %t2 = load i32, ptr %py
+ %t3 = load i32, ptr %px
%cmp = icmp slt i32 %t2, %t3
- %select = select i1 %cmp, i32* %px, i32* %py
- %bc = bitcast i32* %select to i64*
- %r = load i64, i64* %bc
- store i64 %r, i64* %t1
+ %select = select i1 %cmp, ptr %px, ptr %py
+ %r = load i64, ptr %select
+ store i64 %r, ptr %pz
ret void
}
define i64 @infinite_loop_constant_expression_abs(i64 %arg) {
; CHECK-LABEL: @infinite_loop_constant_expression_abs(
-; CHECK-NEXT: [[T:%.*]] = sub i64 ptrtoint (i64* @g to i64), [[ARG:%.*]]
+; CHECK-NEXT: [[T:%.*]] = sub i64 ptrtoint (ptr @g to i64), [[ARG:%.*]]
; CHECK-NEXT: [[TMP1:%.*]] = call i64 @llvm.abs.i64(i64 [[T]], i1 true)
; CHECK-NEXT: ret i64 [[TMP1]]
;
- %t = sub i64 ptrtoint (i64* @g to i64), %arg
+ %t = sub i64 ptrtoint (ptr @g to i64), %arg
%t1 = icmp slt i64 %t, 0
%t2 = sub nsw i64 0, %t
%t3 = select i1 %t1, i64 %t2, i64 %t
ret <3 x i82> %a
}
-define i32 @abs_sext_extra_use(i8 %x, i32* %p) {
+define i32 @abs_sext_extra_use(i8 %x, ptr %p) {
; CHECK-LABEL: @abs_sext_extra_use(
; CHECK-NEXT: [[S:%.*]] = sext i8 [[X:%.*]] to i32
-; CHECK-NEXT: store i32 [[S]], i32* [[P:%.*]], align 4
+; CHECK-NEXT: store i32 [[S]], ptr [[P:%.*]], align 4
; CHECK-NEXT: [[A:%.*]] = call i32 @llvm.abs.i32(i32 [[S]], i1 false)
; CHECK-NEXT: ret i32 [[A]]
;
%s = sext i8 %x to i32
- store i32 %s, i32* %p
+ store i32 %s, ptr %p
%a = call i32 @llvm.abs.i32(i32 %s, i1 0)
ret i32 %a
}
ret i32 %r
}
-define <3 x i82> @srem_by_2(<3 x i82> %x, <3 x i82>* %p) {
+define <3 x i82> @srem_by_2(<3 x i82> %x, ptr %p) {
; CHECK-LABEL: @srem_by_2(
; CHECK-NEXT: [[S:%.*]] = srem <3 x i82> [[X:%.*]], <i82 2, i82 2, i82 2>
-; CHECK-NEXT: store <3 x i82> [[S]], <3 x i82>* [[P:%.*]], align 32
+; CHECK-NEXT: store <3 x i82> [[S]], ptr [[P:%.*]], align 32
; CHECK-NEXT: [[R:%.*]] = and <3 x i82> [[X]], <i82 1, i82 1, i82 1>
; CHECK-NEXT: ret <3 x i82> [[R]]
;
%s = srem <3 x i82> %x, <i82 2, i82 2, i82 2>
- store <3 x i82> %s, <3 x i82>* %p
+ store <3 x i82> %s, ptr %p
%r = call <3 x i82> @llvm.abs.v3i82(<3 x i82> %s, i1 false)
ret <3 x i82> %r
}
; Negative test - extra use of the sext means increase of instructions.
-define i32 @add_nsw_sext_add_extra_use_1(i8 %x, i32* %p) {
+define i32 @add_nsw_sext_add_extra_use_1(i8 %x, ptr %p) {
; CHECK-LABEL: @add_nsw_sext_add_extra_use_1(
; CHECK-NEXT: [[ADD:%.*]] = add nsw i8 [[X:%.*]], 42
; CHECK-NEXT: [[EXT:%.*]] = sext i8 [[ADD]] to i32
-; CHECK-NEXT: store i32 [[EXT]], i32* [[P:%.*]], align 4
+; CHECK-NEXT: store i32 [[EXT]], ptr [[P:%.*]], align 4
; CHECK-NEXT: [[R:%.*]] = add nsw i32 [[EXT]], 356
; CHECK-NEXT: ret i32 [[R]]
;
%add = add nsw i8 %x, 42
%ext = sext i8 %add to i32
- store i32 %ext, i32* %p
+ store i32 %ext, ptr %p
%r = add i32 %ext, 356
ret i32 %r
}
-define <2 x i32> @add_nsw_sext_add_vec_extra_use_2(<2 x i8> %x, <2 x i8>* %p) {
+define <2 x i32> @add_nsw_sext_add_vec_extra_use_2(<2 x i8> %x, ptr %p) {
; CHECK-LABEL: @add_nsw_sext_add_vec_extra_use_2(
; CHECK-NEXT: [[ADD:%.*]] = add nsw <2 x i8> [[X:%.*]], <i8 42, i8 -5>
-; CHECK-NEXT: store <2 x i8> [[ADD]], <2 x i8>* [[P:%.*]], align 2
+; CHECK-NEXT: store <2 x i8> [[ADD]], ptr [[P:%.*]], align 2
; CHECK-NEXT: [[TMP1:%.*]] = sext <2 x i8> [[X]] to <2 x i32>
; CHECK-NEXT: [[R:%.*]] = add nsw <2 x i32> [[TMP1]], <i32 398, i32 7>
; CHECK-NEXT: ret <2 x i32> [[R]]
;
%add = add nsw <2 x i8> %x, <i8 42, i8 -5>
- store <2 x i8> %add, <2 x i8>* %p
+ store <2 x i8> %add, ptr %p
%ext = sext <2 x i8> %add to <2 x i32>
%r = add <2 x i32> %ext, <i32 356, i32 12>
ret <2 x i32> %r
; Negative test - extra use of the zext means increase of instructions.
-define i64 @add_nuw_zext_add_extra_use_1(i8 %x, i64* %p) {
+define i64 @add_nuw_zext_add_extra_use_1(i8 %x, ptr %p) {
; CHECK-LABEL: @add_nuw_zext_add_extra_use_1(
; CHECK-NEXT: [[ADD:%.*]] = add nuw i8 [[X:%.*]], 42
; CHECK-NEXT: [[EXT:%.*]] = zext i8 [[ADD]] to i64
-; CHECK-NEXT: store i64 [[EXT]], i64* [[P:%.*]], align 4
+; CHECK-NEXT: store i64 [[EXT]], ptr [[P:%.*]], align 4
; CHECK-NEXT: [[R:%.*]] = add nuw nsw i64 [[EXT]], 356
; CHECK-NEXT: ret i64 [[R]]
;
%add = add nuw i8 %x, 42
%ext = zext i8 %add to i64
- store i64 %ext, i64* %p
+ store i64 %ext, ptr %p
%r = add i64 %ext, 356
ret i64 %r
}
-define i64 @add_nuw_zext_add_extra_use_2(i8 %x, i8* %p) {
+define i64 @add_nuw_zext_add_extra_use_2(i8 %x, ptr %p) {
; CHECK-LABEL: @add_nuw_zext_add_extra_use_2(
; CHECK-NEXT: [[ADD:%.*]] = add nuw i8 [[X:%.*]], 42
-; CHECK-NEXT: store i8 [[ADD]], i8* [[P:%.*]], align 1
+; CHECK-NEXT: store i8 [[ADD]], ptr [[P:%.*]], align 1
; CHECK-NEXT: [[TMP1:%.*]] = zext i8 [[X]] to i64
; CHECK-NEXT: [[R:%.*]] = add nuw nsw i64 [[TMP1]], -314
; CHECK-NEXT: ret i64 [[R]]
;
%add = add nuw i8 %x, 42
- store i8 %add, i8* %p
+ store i8 %add, ptr %p
%ext = zext i8 %add to i64
%r = add i64 %ext, -356
ret i64 %r
; Negative test - extra use
-define i32 @lshr_add_use(i1 %x, i1 %y, i32* %p) {
+define i32 @lshr_add_use(i1 %x, i1 %y, ptr %p) {
; CHECK-LABEL: @lshr_add_use(
; CHECK-NEXT: [[XZ:%.*]] = zext i1 [[X:%.*]] to i32
-; CHECK-NEXT: store i32 [[XZ]], i32* [[P:%.*]], align 4
+; CHECK-NEXT: store i32 [[XZ]], ptr [[P:%.*]], align 4
; CHECK-NEXT: [[YS:%.*]] = sext i1 [[Y:%.*]] to i32
; CHECK-NEXT: [[SUB:%.*]] = add nsw i32 [[XZ]], [[YS]]
; CHECK-NEXT: [[R:%.*]] = lshr i32 [[SUB]], 31
; CHECK-NEXT: ret i32 [[R]]
;
%xz = zext i1 %x to i32
- store i32 %xz, i32* %p
+ store i32 %xz, ptr %p
%ys = sext i1 %y to i32
%sub = add i32 %xz, %ys
%r = lshr i32 %sub, 31
; Negative test - extra use
-define i32 @lshr_add_use2(i1 %x, i1 %y, i32* %p) {
+define i32 @lshr_add_use2(i1 %x, i1 %y, ptr %p) {
; CHECK-LABEL: @lshr_add_use2(
; CHECK-NEXT: [[XZ:%.*]] = zext i1 [[X:%.*]] to i32
; CHECK-NEXT: [[YS:%.*]] = sext i1 [[Y:%.*]] to i32
-; CHECK-NEXT: store i32 [[YS]], i32* [[P:%.*]], align 4
+; CHECK-NEXT: store i32 [[YS]], ptr [[P:%.*]], align 4
; CHECK-NEXT: [[SUB:%.*]] = add nsw i32 [[XZ]], [[YS]]
; CHECK-NEXT: [[R:%.*]] = lshr i32 [[SUB]], 31
; CHECK-NEXT: ret i32 [[R]]
;
%xz = zext i1 %x to i32
%ys = sext i1 %y to i32
- store i32 %ys, i32* %p
+ store i32 %ys, ptr %p
%sub = add i32 %xz, %ys
%r = lshr i32 %sub, 31
ret i32 %r
; Negative test - extra use
-define i32 @lshr_add_use_sexts(i1 %x, i1 %y, i32* %p) {
+define i32 @lshr_add_use_sexts(i1 %x, i1 %y, ptr %p) {
; CHECK-LABEL: @lshr_add_use_sexts(
; CHECK-NEXT: [[XS:%.*]] = sext i1 [[X:%.*]] to i32
-; CHECK-NEXT: store i32 [[XS]], i32* [[P:%.*]], align 4
+; CHECK-NEXT: store i32 [[XS]], ptr [[P:%.*]], align 4
; CHECK-NEXT: [[YS:%.*]] = sext i1 [[Y:%.*]] to i32
; CHECK-NEXT: [[SUB:%.*]] = add nsw i32 [[XS]], [[YS]]
; CHECK-NEXT: [[R:%.*]] = lshr i32 [[SUB]], 31
; CHECK-NEXT: ret i32 [[R]]
;
%xs = sext i1 %x to i32
- store i32 %xs, i32* %p
+ store i32 %xs, ptr %p
%ys = sext i1 %y to i32
%sub = add i32 %xs, %ys
%r = lshr i32 %sub, 31
; Negative test - extra use
-define i32 @lshr_add_use2_sexts(i1 %x, i1 %y, i32* %p) {
+define i32 @lshr_add_use2_sexts(i1 %x, i1 %y, ptr %p) {
; CHECK-LABEL: @lshr_add_use2_sexts(
; CHECK-NEXT: [[XS:%.*]] = sext i1 [[X:%.*]] to i32
; CHECK-NEXT: [[YS:%.*]] = sext i1 [[Y:%.*]] to i32
-; CHECK-NEXT: store i32 [[YS]], i32* [[P:%.*]], align 4
+; CHECK-NEXT: store i32 [[YS]], ptr [[P:%.*]], align 4
; CHECK-NEXT: [[SUB:%.*]] = add nsw i32 [[XS]], [[YS]]
; CHECK-NEXT: [[R:%.*]] = lshr i32 [[SUB]], 31
; CHECK-NEXT: ret i32 [[R]]
;
%xs = sext i1 %x to i32
%ys = sext i1 %y to i32
- store i32 %ys, i32* %p
+ store i32 %ys, ptr %p
%sub = add i32 %xs, %ys
%r = lshr i32 %sub, 31
ret i32 %r
define void @test2(i32 %.val24) {
EntryBlock:
add i32 %.val24, -12
- inttoptr i32 %0 to i32*
- store i32 1, i32* %1
+ inttoptr i32 %0 to ptr
+ store i32 1, ptr %1
add i32 %.val24, -16
- inttoptr i32 %2 to i32*
- getelementptr i32, i32* %3, i32 1
- load i32, i32* %4
+ inttoptr i32 %2 to ptr
+ getelementptr i32, ptr %3, i32 1
+ load i32, ptr %4
tail call i32 @callee( i32 %5 )
ret void
}
; CHECK-NEXT: br label [[BB1:%.*]]
; CHECK: bb1:
; CHECK-NEXT: [[J:%.*]] = phi i64 [ 0, [[BB7_OUTER]] ], [ [[INDVAR_NEXT:%.*]], [[BB1]] ]
-; CHECK-NEXT: [[T4:%.*]] = getelementptr [1001 x [20000 x double]], [1001 x [20000 x double]]* @Nice, i64 0, i64 [[I]], i64 [[J]]
-; CHECK-NEXT: [[Q:%.*]] = bitcast double* [[T4]] to <2 x double>*
-; CHECK-NEXT: store <2 x double> zeroinitializer, <2 x double>* [[Q]], align 16
-; CHECK-NEXT: [[S4:%.*]] = getelementptr [1001 x [20001 x double]], [1001 x [20001 x double]]* @Awkward, i64 0, i64 [[I]], i64 [[J]]
-; CHECK-NEXT: [[R:%.*]] = bitcast double* [[S4]] to <2 x double>*
-; CHECK-NEXT: store <2 x double> zeroinitializer, <2 x double>* [[R]], align 8
+; CHECK-NEXT: [[T4:%.*]] = getelementptr [1001 x [20000 x double]], ptr @Nice, i64 0, i64 [[I]], i64 [[J]]
+; CHECK-NEXT: store <2 x double> zeroinitializer, ptr [[T4]], align 16
+; CHECK-NEXT: [[S4:%.*]] = getelementptr [1001 x [20001 x double]], ptr @Awkward, i64 0, i64 [[I]], i64 [[J]]
+; CHECK-NEXT: store <2 x double> zeroinitializer, ptr [[S4]], align 8
; CHECK-NEXT: [[INDVAR_NEXT]] = add i64 [[J]], 2
; CHECK-NEXT: [[EXITCOND:%.*]] = icmp eq i64 [[INDVAR_NEXT]], 556
; CHECK-NEXT: br i1 [[EXITCOND]], label [[BB11]], label [[BB1]]
bb1:
%j = phi i64 [ 0, %bb7.outer ], [ %indvar.next, %bb1 ]
- %t4 = getelementptr [1001 x [20000 x double]], [1001 x [20000 x double]]* @Nice, i64 0, i64 %i, i64 %j
- %q = bitcast double* %t4 to <2 x double>*
- store <2 x double><double 0.0, double 0.0>, <2 x double>* %q, align 8
+ %t4 = getelementptr [1001 x [20000 x double]], ptr @Nice, i64 0, i64 %i, i64 %j
+ store <2 x double><double 0.0, double 0.0>, ptr %t4, align 8
- %s4 = getelementptr [1001 x [20001 x double]], [1001 x [20001 x double]]* @Awkward, i64 0, i64 %i, i64 %j
- %r = bitcast double* %s4 to <2 x double>*
- store <2 x double><double 0.0, double 0.0>, <2 x double>* %r, align 8
+ %s4 = getelementptr [1001 x [20001 x double]], ptr @Awkward, i64 0, i64 %i, i64 %j
+ store <2 x double><double 0.0, double 0.0>, ptr %s4, align 8
%indvar.next = add i64 %j, 2
%exitcond = icmp eq i64 %indvar.next, 556
; Instcombine should be able to prove vector alignment in the
; presence of a few mild address computation tricks.
-define void @test0(i8* %b, i64 %n, i64 %u, i64 %y) nounwind {
+define void @test0(ptr %b, i64 %n, i64 %u, i64 %y) nounwind {
; CHECK-LABEL: @test0(
; CHECK-NEXT: entry:
-; CHECK-NEXT: [[C:%.*]] = ptrtoint i8* [[B:%.*]] to i64
+; CHECK-NEXT: [[C:%.*]] = ptrtoint ptr [[B:%.*]] to i64
; CHECK-NEXT: [[D:%.*]] = and i64 [[C]], -16
-; CHECK-NEXT: [[E:%.*]] = inttoptr i64 [[D]] to double*
+; CHECK-NEXT: [[E:%.*]] = inttoptr i64 [[D]] to ptr
; CHECK-NEXT: [[V:%.*]] = shl i64 [[U:%.*]], 1
; CHECK-NEXT: [[Z:%.*]] = and i64 [[Y:%.*]], -2
; CHECK-NEXT: [[T1421:%.*]] = icmp eq i64 [[N:%.*]], 0
; CHECK-NEXT: [[I:%.*]] = phi i64 [ [[INDVAR_NEXT:%.*]], [[BB]] ], [ 20, [[ENTRY:%.*]] ]
; CHECK-NEXT: [[J:%.*]] = mul i64 [[I]], [[V]]
; CHECK-NEXT: [[H:%.*]] = add i64 [[J]], [[Z]]
-; CHECK-NEXT: [[T8:%.*]] = getelementptr double, double* [[E]], i64 [[H]]
-; CHECK-NEXT: [[P:%.*]] = bitcast double* [[T8]] to <2 x double>*
-; CHECK-NEXT: store <2 x double> zeroinitializer, <2 x double>* [[P]], align 16
+; CHECK-NEXT: [[T8:%.*]] = getelementptr double, ptr [[E]], i64 [[H]]
+; CHECK-NEXT: store <2 x double> zeroinitializer, ptr [[T8]], align 16
; CHECK-NEXT: [[INDVAR_NEXT]] = add i64 [[I]], 1
; CHECK-NEXT: [[EXITCOND:%.*]] = icmp eq i64 [[INDVAR_NEXT]], [[N]]
; CHECK-NEXT: br i1 [[EXITCOND]], label [[RETURN]], label [[BB]]
; CHECK-NEXT: ret void
;
entry:
- %c = ptrtoint i8* %b to i64
+ %c = ptrtoint ptr %b to i64
%d = and i64 %c, -16
- %e = inttoptr i64 %d to double*
+ %e = inttoptr i64 %d to ptr
%v = mul i64 %u, 2
%z = and i64 %y, -2
%t1421 = icmp eq i64 %n, 0
%i = phi i64 [ %indvar.next, %bb ], [ 20, %entry ]
%j = mul i64 %i, %v
%h = add i64 %j, %z
- %t8 = getelementptr double, double* %e, i64 %h
- %p = bitcast double* %t8 to <2 x double>*
- store <2 x double><double 0.0, double 0.0>, <2 x double>* %p, align 8
+ %t8 = getelementptr double, ptr %e, i64 %h
+ store <2 x double><double 0.0, double 0.0>, ptr %t8, align 8
%indvar.next = add i64 %i, 1
%exitcond = icmp eq i64 %indvar.next, %n
br i1 %exitcond, label %return, label %bb
define <16 x i8> @test1(<2 x i64> %x) {
; CHECK-LABEL: @test1(
; CHECK-NEXT: entry:
-; CHECK-NEXT: [[TMP:%.*]] = load <16 x i8>, <16 x i8>* bitcast ([4 x i32]* @GLOBAL to <16 x i8>*), align 16
+; CHECK-NEXT: [[TMP:%.*]] = load <16 x i8>, ptr @GLOBAL, align 16
; CHECK-NEXT: ret <16 x i8> [[TMP]]
;
entry:
- %tmp = load <16 x i8>, <16 x i8>* bitcast ([4 x i32]* @GLOBAL to <16 x i8>*), align 1
+ %tmp = load <16 x i8>, ptr @GLOBAL, align 1
ret <16 x i8> %tmp
}
define <16 x i8> @test1_as1(<2 x i64> %x) {
; CHECK-LABEL: @test1_as1(
-; CHECK-NEXT: [[TMP:%.*]] = load <16 x i8>, <16 x i8> addrspace(1)* bitcast ([4 x i32] addrspace(1)* @GLOBAL_as1 to <16 x i8> addrspace(1)*), align 16
+; CHECK-NEXT: [[TMP:%.*]] = load <16 x i8>, ptr addrspace(1) @GLOBAL_as1, align 16
; CHECK-NEXT: ret <16 x i8> [[TMP]]
;
- %tmp = load <16 x i8>, <16 x i8> addrspace(1)* bitcast ([4 x i32] addrspace(1)* @GLOBAL_as1 to <16 x i8> addrspace(1)*), align 1
+ %tmp = load <16 x i8>, ptr addrspace(1) @GLOBAL_as1, align 1
ret <16 x i8> %tmp
}
define <16 x i8> @test1_as1_gep(<2 x i64> %x) {
; CHECK-LABEL: @test1_as1_gep(
-; CHECK-NEXT: [[TMP:%.*]] = load <16 x i8>, <16 x i8> addrspace(1)* bitcast (i32 addrspace(1)* getelementptr inbounds ([8 x i32], [8 x i32] addrspace(1)* @GLOBAL_as1_gep, i32 0, i32 4) to <16 x i8> addrspace(1)*), align 16
+; CHECK-NEXT: [[TMP:%.*]] = load <16 x i8>, ptr addrspace(1) getelementptr inbounds ([8 x i32], ptr addrspace(1) @GLOBAL_as1_gep, i32 0, i32 4), align 16
; CHECK-NEXT: ret <16 x i8> [[TMP]]
;
- %tmp = load <16 x i8>, <16 x i8> addrspace(1)* bitcast (i32 addrspace(1)* getelementptr ([8 x i32], [8 x i32] addrspace(1)* @GLOBAL_as1_gep, i16 0, i16 4) to <16 x i8> addrspace(1)*), align 1
+ %tmp = load <16 x i8>, ptr addrspace(1) getelementptr ([8 x i32], ptr addrspace(1) @GLOBAL_as1_gep, i16 0, i16 4), align 1
ret <16 x i8> %tmp
}
; When a load or store lacks an explicit alignment, add one.
-define double @test2(double* %p, double %n) nounwind {
+define double @test2(ptr %p, double %n) nounwind {
; CHECK-LABEL: @test2(
-; CHECK-NEXT: [[T:%.*]] = load double, double* [[P:%.*]], align 8
-; CHECK-NEXT: store double [[N:%.*]], double* [[P]], align 8
+; CHECK-NEXT: [[T:%.*]] = load double, ptr [[P:%.*]], align 8
+; CHECK-NEXT: store double [[N:%.*]], ptr [[P]], align 8
; CHECK-NEXT: ret double [[T]]
;
- %t = load double, double* %p
- store double %n, double* %p
+ %t = load double, ptr %p
+ store double %n, ptr %p
ret double %t
}
-declare void @llvm.memset.p0i8.i64(i8* nocapture, i8, i64, i1) nounwind
+declare void @llvm.memset.p0.i64(ptr nocapture, i8, i64, i1) nounwind
-declare void @use(i8*)
+declare void @use(ptr)
%struct.s = type { i32, i32, i32, i32 }
-define void @test3(%struct.s* sret(%struct.s) %a4) {
+define void @test3(ptr sret(%struct.s) %a4) {
; Check that the alignment is bumped up the alignment of the sret type.
; CHECK-LABEL: @test3(
-; CHECK-NEXT: [[A4_CAST:%.*]] = bitcast %struct.s* [[A4:%.*]] to i8*
-; CHECK-NEXT: call void @llvm.memset.p0i8.i64(i8* noundef nonnull align 4 dereferenceable(16) [[A4_CAST]], i8 0, i64 16, i1 false)
-; CHECK-NEXT: call void @use(i8* [[A4_CAST]])
+; CHECK-NEXT: call void @llvm.memset.p0.i64(ptr noundef nonnull align 4 dereferenceable(16) [[A4:%.*]], i8 0, i64 16, i1 false)
+; CHECK-NEXT: call void @use(ptr [[A4:%.*]])
; CHECK-NEXT: ret void
;
- %a4.cast = bitcast %struct.s* %a4 to i8*
- call void @llvm.memset.p0i8.i64(i8* %a4.cast, i8 0, i64 16, i1 false)
- call void @use(i8* %a4.cast)
+ call void @llvm.memset.p0.i64(ptr %a4, i8 0, i64 16, i1 false)
+ call void @use(ptr %a4)
ret void
}
target triple = "x86_64-unknown-linux-gnu"
; Function Attrs: nounwind uwtable
-define i32 @foo1(i32* align 32 %a) #0 {
+define i32 @foo1(ptr align 32 %a) #0 {
; CHECK-LABEL: @foo1(
; CHECK-NEXT: entry:
-; CHECK-NEXT: [[TMP0:%.*]] = load i32, i32* [[A:%.*]], align 32
+; CHECK-NEXT: [[TMP0:%.*]] = load i32, ptr [[A:%.*]], align 32
; CHECK-NEXT: ret i32 [[TMP0]]
;
entry:
- %0 = load i32, i32* %a, align 4
+ %0 = load i32, ptr %a, align 4
ret i32 %0
}
-define i32 @foo2(i32* align 32 %a) #0 {
+define i32 @foo2(ptr align 32 %a) #0 {
; CHECK-LABEL: @foo2(
; CHECK-NEXT: entry:
-; CHECK-NEXT: [[V:%.*]] = call i32* @func1(i32* [[A:%.*]])
-; CHECK-NEXT: [[TMP0:%.*]] = load i32, i32* [[A]], align 32
+; CHECK-NEXT: [[V:%.*]] = call ptr @func1(ptr [[A:%.*]])
+; CHECK-NEXT: [[TMP0:%.*]] = load i32, ptr [[A]], align 32
; CHECK-NEXT: ret i32 [[TMP0]]
;
entry:
- %v = call i32* @func1(i32* %a)
- %0 = load i32, i32* %v, align 4
+ %v = call ptr @func1(ptr %a)
+ %0 = load i32, ptr %v, align 4
ret i32 %0
}
-declare i32* @func1(i32* returned) nounwind
+declare ptr @func1(ptr returned) nounwind
; CHECK: @C = available_externally global <4 x i32> zeroinitializer, align 4
define i64 @foo(i64 %a) {
- %t = ptrtoint i32* @A to i64
+ %t = ptrtoint ptr @A to i64
%s = shl i64 %a, 3
%r = or i64 %t, %s
%q = add i64 %r, 1
; CHECK-LABEL: define i64 @foo(i64 %a)
; CHECK: %s = shl i64 %a, 3
-; CHECK: %r = or i64 %s, ptrtoint (i32* @A to i64)
+; CHECK: %r = or i64 %s, ptrtoint (ptr @A to i64)
; CHECK: %q = add i64 %r, 1
; CHECK: ret i64 %q
define i32 @bar() {
- %r = load i32, i32* @B, align 1
+ %r = load i32, ptr @B, align 1
ret i32 %r
}
; CHECK: align 1
define void @vec_store() {
- store <4 x i32> <i32 0, i32 1, i32 2, i32 3>, <4 x i32>* @C, align 4
+ store <4 x i32> <i32 0, i32 1, i32 2, i32 3>, ptr @C, align 4
ret void
}
; CHECK: define void @vec_store()
-; CHECK: store <4 x i32> <i32 0, i32 1, i32 2, i32 3>, <4 x i32>* @C, align 4
+; CHECK: store <4 x i32> <i32 0, i32 1, i32 2, i32 3>, ptr @C, align 4
target triple = "powerpc64-unknown-linux-gnu"
@d = global i32 15, align 4
-@b = global i32* @d, align 8
+@b = global ptr @d, align 8
@a = common global i32 0, align 4
; Check that both InstCombine and InstSimplify can use computeKnownBits to
define signext i32 @main() #1 {
; CHECK-LABEL: @main(
; CHECK-NEXT: entry:
-; CHECK-NEXT: [[TMP0:%.*]] = load i32*, i32** @b, align 8
-; CHECK-NEXT: [[TMP1:%.*]] = load i32, i32* [[TMP0]], align 4
+; CHECK-NEXT: [[TMP0:%.*]] = load ptr, ptr @b, align 8
+; CHECK-NEXT: [[TMP1:%.*]] = load i32, ptr [[TMP0]], align 4
; CHECK-NEXT: ret i32 [[TMP1]]
;
entry:
- %0 = load i32*, i32** @b, align 8
- %1 = load i32, i32* @a, align 4
+ %0 = load ptr, ptr @b, align 8
+ %1 = load i32, ptr @a, align 4
%lnot = icmp eq i32 %1, 0
%lnot.ext = zext i1 %lnot to i32
%shr.i = lshr i32 2072, %lnot.ext
%call.lobit = lshr i32 %shr.i, 7
%2 = and i32 %call.lobit, 1
- %3 = load i32, i32* %0, align 4
+ %3 = load i32, ptr %0, align 4
%or = or i32 %2, %3
- store i32 %or, i32* %0, align 4
- %4 = load i32, i32* @a, align 4
+ store i32 %or, ptr %0, align 4
+ %4 = load i32, ptr @a, align 4
%lnot.1 = icmp eq i32 %4, 0
%lnot.ext.1 = zext i1 %lnot.1 to i32
%shr.i.1 = lshr i32 2072, %lnot.ext.1
%call.lobit.1 = lshr i32 %shr.i.1, 7
%5 = and i32 %call.lobit.1, 1
%or.1 = or i32 %5, %or
- store i32 %or.1, i32* %0, align 4
+ store i32 %or.1, ptr %0, align 4
ret i32 %or.1
}
; CHECK-LABEL: @test(
; CHECK-NEXT: ret void
;
- %1 = tail call noalias align 16 dereferenceable_or_null(4) i8* @malloc(i64 4) #4
- %2 = tail call align 16 dereferenceable_or_null(6) i8* @realloc(i8* %1, i64 6) #4
- tail call void @free(i8* %2) #4
+ %1 = tail call noalias align 16 dereferenceable_or_null(4) ptr @malloc(i64 4) #4
+ %2 = tail call align 16 dereferenceable_or_null(6) ptr @realloc(ptr %1, i64 6) #4
+ tail call void @free(ptr %2) #4
ret void
}
-declare dso_local noalias noundef i8* @malloc(i64 noundef) local_unnamed_addr #1
-declare dso_local noalias noundef i8* @realloc(i8* nocapture allocptr, i64 noundef) local_unnamed_addr #2
-declare dso_local void @free(i8* nocapture allocptr noundef) local_unnamed_addr #3
+declare dso_local noalias noundef ptr @malloc(i64 noundef) local_unnamed_addr #1
+declare dso_local noalias noundef ptr @realloc(ptr nocapture allocptr, i64 noundef) local_unnamed_addr #2
+declare dso_local void @free(ptr nocapture allocptr noundef) local_unnamed_addr #3
declare void @llvm.dbg.value(metadata, metadata, metadata) #3
attributes #0 = { mustprogress nounwind uwtable willreturn }
; RUN: opt < %s -passes=instcombine -S | FileCheck %s
; OSS-Fuzz: https://bugs.chromium.org/p/oss-fuzz/issues/detail?id=5223
-define void @test_bigalloc(i8** %dst) {
+define void @test_bigalloc(ptr %dst) {
; CHECK-LABEL: @test_bigalloc(
; CHECK-NEXT: [[TMP1:%.*]] = alloca [18446744069414584320 x i8], align 1
-; CHECK-NEXT: [[DOTSUB:%.*]] = getelementptr inbounds [18446744069414584320 x i8], [18446744069414584320 x i8]* [[TMP1]], i64 0, i64 0
-; CHECK-NEXT: store i8* [[DOTSUB]], i8** [[DST:%.*]], align 8
+; CHECK-NEXT: store ptr [[TMP1]], ptr [[DST:%.*]], align 8
; CHECK-NEXT: ret void
;
%1 = alloca i8, i864 -4294967296
- %2 = getelementptr i8, i8* %1, i1 0
- store i8* %2, i8** %dst
+ store ptr %1, ptr %dst
ret void
}
target datalayout="e-p:32:32:32"
-declare i8* @my_malloc(i8*, i64) allocsize(1)
+declare ptr @my_malloc(ptr, i64) allocsize(1)
-define void @test_malloc(i8** %p, i32* %r) {
- %1 = call i8* @my_malloc(i8* null, i64 100)
- store i8* %1, i8** %p, align 8 ; To ensure objectsize isn't killed
+define void @test_malloc(ptr %p, ptr %r) {
+ %1 = call ptr @my_malloc(ptr null, i64 100)
+ store ptr %1, ptr %p, align 8 ; To ensure objectsize isn't killed
- %2 = call i32 @llvm.objectsize.i32.p0i8(i8* %1, i1 false)
+ %2 = call i32 @llvm.objectsize.i32.p0(ptr %1, i1 false)
; CHECK: store i32 100
- store i32 %2, i32* %r, align 8
+ store i32 %2, ptr %r, align 8
; Big number is 5 billion.
- %3 = call i8* @my_malloc(i8* null, i64 5000000000)
- store i8* %3, i8** %p, align 8 ; To ensure objectsize isn't killed
+ %3 = call ptr @my_malloc(ptr null, i64 5000000000)
+ store ptr %3, ptr %p, align 8 ; To ensure objectsize isn't killed
; CHECK: call i32 @llvm.objectsize
- %4 = call i32 @llvm.objectsize.i32.p0i8(i8* %3, i1 false)
- store i32 %4, i32* %r, align 8
+ %4 = call i32 @llvm.objectsize.i32.p0(ptr %3, i1 false)
+ store i32 %4, ptr %r, align 8
ret void
}
-declare i32 @llvm.objectsize.i32.p0i8(i8*, i1)
+declare i32 @llvm.objectsize.i32.p0(ptr, i1)
; args, and to prove that arbitrary unfoldable values don't interfere with
; allocsize if they're not used by allocsize.
-declare i8* @my_malloc(i8*, i32) allocsize(1)
-declare i8* @my_calloc(i8*, i8*, i32, i32) allocsize(2, 3)
+declare ptr @my_malloc(ptr, i32) allocsize(1)
+declare ptr @my_calloc(ptr, ptr, i32, i32) allocsize(2, 3)
; CHECK-LABEL: define void @test_malloc
-define void @test_malloc(i8** %p, i64* %r) {
- %1 = call i8* @my_malloc(i8* null, i32 100)
- store i8* %1, i8** %p, align 8 ; To ensure objectsize isn't killed
+define void @test_malloc(ptr %p, ptr %r) {
+ %1 = call ptr @my_malloc(ptr null, i32 100)
+ store ptr %1, ptr %p, align 8 ; To ensure objectsize isn't killed
- %2 = call i64 @llvm.objectsize.i64.p0i8(i8* %1, i1 false)
+ %2 = call i64 @llvm.objectsize.i64.p0(ptr %1, i1 false)
; CHECK: store i64 100
- store i64 %2, i64* %r, align 8
+ store i64 %2, ptr %r, align 8
ret void
}
; CHECK-LABEL: define void @test_calloc
-define void @test_calloc(i8** %p, i64* %r) {
- %1 = call i8* @my_calloc(i8* null, i8* null, i32 100, i32 5)
- store i8* %1, i8** %p, align 8 ; To ensure objectsize isn't killed
+define void @test_calloc(ptr %p, ptr %r) {
+ %1 = call ptr @my_calloc(ptr null, ptr null, i32 100, i32 5)
+ store ptr %1, ptr %p, align 8 ; To ensure objectsize isn't killed
- %2 = call i64 @llvm.objectsize.i64.p0i8(i8* %1, i1 false)
+ %2 = call i64 @llvm.objectsize.i64.p0(ptr %1, i1 false)
; CHECK: store i64 500
- store i64 %2, i64* %r, align 8
+ store i64 %2, ptr %r, align 8
ret void
}
; Failure cases with non-constant values...
; CHECK-LABEL: define void @test_malloc_fails
-define void @test_malloc_fails(i8** %p, i64* %r, i32 %n) {
- %1 = call i8* @my_malloc(i8* null, i32 %n)
- store i8* %1, i8** %p, align 8 ; To ensure objectsize isn't killed
+define void @test_malloc_fails(ptr %p, ptr %r, i32 %n) {
+ %1 = call ptr @my_malloc(ptr null, i32 %n)
+ store ptr %1, ptr %p, align 8 ; To ensure objectsize isn't killed
- ; CHECK: @llvm.objectsize.i64.p0i8
- %2 = call i64 @llvm.objectsize.i64.p0i8(i8* %1, i1 false)
- store i64 %2, i64* %r, align 8
+ ; CHECK: @llvm.objectsize.i64.p0
+ %2 = call i64 @llvm.objectsize.i64.p0(ptr %1, i1 false)
+ store i64 %2, ptr %r, align 8
ret void
}
; CHECK-LABEL: define void @test_calloc_fails
-define void @test_calloc_fails(i8** %p, i64* %r, i32 %n) {
- %1 = call i8* @my_calloc(i8* null, i8* null, i32 %n, i32 5)
- store i8* %1, i8** %p, align 8 ; To ensure objectsize isn't killed
+define void @test_calloc_fails(ptr %p, ptr %r, i32 %n) {
+ %1 = call ptr @my_calloc(ptr null, ptr null, i32 %n, i32 5)
+ store ptr %1, ptr %p, align 8 ; To ensure objectsize isn't killed
- ; CHECK: @llvm.objectsize.i64.p0i8
- %2 = call i64 @llvm.objectsize.i64.p0i8(i8* %1, i1 false)
- store i64 %2, i64* %r, align 8
+ ; CHECK: @llvm.objectsize.i64.p0
+ %2 = call i64 @llvm.objectsize.i64.p0(ptr %1, i1 false)
+ store i64 %2, ptr %r, align 8
- %3 = call i8* @my_calloc(i8* null, i8* null, i32 100, i32 %n)
- store i8* %3, i8** %p, align 8 ; To ensure objectsize isn't killed
+ %3 = call ptr @my_calloc(ptr null, ptr null, i32 100, i32 %n)
+ store ptr %3, ptr %p, align 8 ; To ensure objectsize isn't killed
- ; CHECK: @llvm.objectsize.i64.p0i8
- %4 = call i64 @llvm.objectsize.i64.p0i8(i8* %3, i1 false)
- store i64 %4, i64* %r, align 8
+ ; CHECK: @llvm.objectsize.i64.p0
+ %4 = call i64 @llvm.objectsize.i64.p0(ptr %3, i1 false)
+ store i64 %4, ptr %r, align 8
ret void
}
-declare i8* @my_malloc_outofline(i8*, i32) #0
-declare i8* @my_calloc_outofline(i8*, i8*, i32, i32) #1
+declare ptr @my_malloc_outofline(ptr, i32) #0
+declare ptr @my_calloc_outofline(ptr, ptr, i32, i32) #1
; Verifying that out of line allocsize is parsed correctly
; CHECK-LABEL: define void @test_outofline
-define void @test_outofline(i8** %p, i64* %r) {
- %1 = call i8* @my_malloc_outofline(i8* null, i32 100)
- store i8* %1, i8** %p, align 8 ; To ensure objectsize isn't killed
+define void @test_outofline(ptr %p, ptr %r) {
+ %1 = call ptr @my_malloc_outofline(ptr null, i32 100)
+ store ptr %1, ptr %p, align 8 ; To ensure objectsize isn't killed
- %2 = call i64 @llvm.objectsize.i64.p0i8(i8* %1, i1 false)
+ %2 = call i64 @llvm.objectsize.i64.p0(ptr %1, i1 false)
; CHECK: store i64 100
- store i64 %2, i64* %r, align 8
+ store i64 %2, ptr %r, align 8
- %3 = call i8* @my_calloc_outofline(i8* null, i8* null, i32 100, i32 5)
- store i8* %3, i8** %p, align 8 ; To ensure objectsize isn't killed
+ %3 = call ptr @my_calloc_outofline(ptr null, ptr null, i32 100, i32 5)
+ store ptr %3, ptr %p, align 8 ; To ensure objectsize isn't killed
- %4 = call i64 @llvm.objectsize.i64.p0i8(i8* %3, i1 false)
+ %4 = call i64 @llvm.objectsize.i64.p0(ptr %3, i1 false)
; CHECK: store i64 500
- store i64 %4, i64* %r, align 8
+ store i64 %4, ptr %r, align 8
ret void
}
-declare i8* @my_malloc_i64(i8*, i64) #0
-declare i8* @my_tiny_calloc(i8*, i8*, i8, i8) #1
-declare i8* @my_varied_calloc(i8*, i8*, i32, i8) #1
+declare ptr @my_malloc_i64(ptr, i64) #0
+declare ptr @my_tiny_calloc(ptr, ptr, i8, i8) #1
+declare ptr @my_varied_calloc(ptr, ptr, i32, i8) #1
; CHECK-LABEL: define void @test_overflow
-define void @test_overflow(i8** %p, i32* %r) {
- %r64 = bitcast i32* %r to i64*
+define void @test_overflow(ptr %p, ptr %r) {
; (2**31 + 1) * 2 > 2**31. So overflow. Yay.
- %big_malloc = call i8* @my_calloc(i8* null, i8* null, i32 2147483649, i32 2)
- store i8* %big_malloc, i8** %p, align 8
+ %big_malloc = call ptr @my_calloc(ptr null, ptr null, i32 2147483649, i32 2)
+ store ptr %big_malloc, ptr %p, align 8
; CHECK: @llvm.objectsize
- %1 = call i32 @llvm.objectsize.i32.p0i8(i8* %big_malloc, i1 false)
- store i32 %1, i32* %r, align 4
+ %1 = call i32 @llvm.objectsize.i32.p0(ptr %big_malloc, i1 false)
+ store i32 %1, ptr %r, align 4
- %big_little_malloc = call i8* @my_tiny_calloc(i8* null, i8* null, i8 127, i8 4)
- store i8* %big_little_malloc, i8** %p, align 8
+ %big_little_malloc = call ptr @my_tiny_calloc(ptr null, ptr null, i8 127, i8 4)
+ store ptr %big_little_malloc, ptr %p, align 8
; CHECK: store i32 508
- %2 = call i32 @llvm.objectsize.i32.p0i8(i8* %big_little_malloc, i1 false)
- store i32 %2, i32* %r, align 4
+ %2 = call i32 @llvm.objectsize.i32.p0(ptr %big_little_malloc, i1 false)
+ store i32 %2, ptr %r, align 4
; malloc(2**33)
- %big_malloc_i64 = call i8* @my_malloc_i64(i8* null, i64 8589934592)
- store i8* %big_malloc_i64, i8** %p, align 8
+ %big_malloc_i64 = call ptr @my_malloc_i64(ptr null, i64 8589934592)
+ store ptr %big_malloc_i64, ptr %p, align 8
; CHECK: @llvm.objectsize
- %3 = call i32 @llvm.objectsize.i32.p0i8(i8* %big_malloc_i64, i1 false)
- store i32 %3, i32* %r, align 4
+ %3 = call i32 @llvm.objectsize.i32.p0(ptr %big_malloc_i64, i1 false)
+ store i32 %3, ptr %r, align 4
- %4 = call i64 @llvm.objectsize.i64.p0i8(i8* %big_malloc_i64, i1 false)
+ %4 = call i64 @llvm.objectsize.i64.p0(ptr %big_malloc_i64, i1 false)
; CHECK: store i64 8589934592
- store i64 %4, i64* %r64, align 8
+ store i64 %4, ptr %r, align 8
; Just intended to ensure that we properly handle args of different types...
- %varied_calloc = call i8* @my_varied_calloc(i8* null, i8* null, i32 1000, i8 5)
- store i8* %varied_calloc, i8** %p, align 8
+ %varied_calloc = call ptr @my_varied_calloc(ptr null, ptr null, i32 1000, i8 5)
+ store ptr %varied_calloc, ptr %p, align 8
; CHECK: store i32 5000
- %5 = call i32 @llvm.objectsize.i32.p0i8(i8* %varied_calloc, i1 false)
- store i32 %5, i32* %r, align 4
+ %5 = call i32 @llvm.objectsize.i32.p0(ptr %varied_calloc, i1 false)
+ store i32 %5, ptr %r, align 4
ret void
}
; CHECK-LABEL: define void @test_nobuiltin
; We had a bug where `nobuiltin` would cause `allocsize` to be ignored in
; @llvm.objectsize calculations.
-define void @test_nobuiltin(i8** %p, i64* %r) {
- %1 = call i8* @my_malloc(i8* null, i32 100) nobuiltin
- store i8* %1, i8** %p, align 8
+define void @test_nobuiltin(ptr %p, ptr %r) {
+ %1 = call ptr @my_malloc(ptr null, i32 100) nobuiltin
+ store ptr %1, ptr %p, align 8
- %2 = call i64 @llvm.objectsize.i64.p0i8(i8* %1, i1 false)
+ %2 = call i64 @llvm.objectsize.i64.p0(ptr %1, i1 false)
; CHECK: store i64 100
- store i64 %2, i64* %r, align 8
+ store i64 %2, ptr %r, align 8
ret void
}
attributes #0 = { allocsize(1) }
attributes #1 = { allocsize(2, 3) }
-declare i32 @llvm.objectsize.i32.p0i8(i8*, i1)
-declare i64 @llvm.objectsize.i64.p0i8(i8*, i1)
+declare i32 @llvm.objectsize.i32.p0(ptr, i1)
+declare i64 @llvm.objectsize.i64.p0(ptr, i1)
;
;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;
-define i1 @ugt_and_min(i8* %x, i8* %y) {
+define i1 @ugt_and_min(ptr %x, ptr %y) {
; CHECK-LABEL: @ugt_and_min(
; CHECK-NEXT: ret i1 false
;
- %cmp = icmp ugt i8* %x, %y
- %cmpeq = icmp eq i8* %x, null
+ %cmp = icmp ugt ptr %x, %y
+ %cmpeq = icmp eq ptr %x, null
%r = and i1 %cmp, %cmpeq
ret i1 %r
}
-define i1 @ugt_and_min_logical(i8* %x, i8* %y) {
+define i1 @ugt_and_min_logical(ptr %x, ptr %y) {
; CHECK-LABEL: @ugt_and_min_logical(
; CHECK-NEXT: ret i1 false
;
- %cmp = icmp ugt i8* %x, %y
- %cmpeq = icmp eq i8* %x, null
+ %cmp = icmp ugt ptr %x, %y
+ %cmpeq = icmp eq ptr %x, null
%r = select i1 %cmp, i1 %cmpeq, i1 false
ret i1 %r
}
-define i1 @ugt_and_min_commute(<2 x i8>* %x, <2 x i8>* %y) {
+define i1 @ugt_and_min_commute(ptr %x, ptr %y) {
; CHECK-LABEL: @ugt_and_min_commute(
; CHECK-NEXT: ret i1 false
;
- %cmp = icmp ugt <2 x i8>* %x, %y
- %cmpeq = icmp eq <2 x i8>* %x, null
+ %cmp = icmp ugt ptr %x, %y
+ %cmpeq = icmp eq ptr %x, null
%r = and i1 %cmpeq, %cmp
ret i1 %r
}
-define i1 @ugt_and_min_commute_logical(<2 x i8>* %x, <2 x i8>* %y) {
+define i1 @ugt_and_min_commute_logical(ptr %x, ptr %y) {
; CHECK-LABEL: @ugt_and_min_commute_logical(
; CHECK-NEXT: ret i1 false
;
- %cmp = icmp ugt <2 x i8>* %x, %y
- %cmpeq = icmp eq <2 x i8>* %x, null
+ %cmp = icmp ugt ptr %x, %y
+ %cmpeq = icmp eq ptr %x, null
%r = select i1 %cmpeq, i1 %cmp, i1 false
ret i1 %r
}
-define i1 @ugt_swap_and_min(i8* %x, i8* %y) {
+define i1 @ugt_swap_and_min(ptr %x, ptr %y) {
; CHECK-LABEL: @ugt_swap_and_min(
; CHECK-NEXT: ret i1 false
;
- %cmp = icmp ult i8* %y, %x
- %cmpeq = icmp eq i8* %x, null
+ %cmp = icmp ult ptr %y, %x
+ %cmpeq = icmp eq ptr %x, null
%r = and i1 %cmp, %cmpeq
ret i1 %r
}
-define i1 @ugt_swap_and_min_logical(i8* %x, i8* %y) {
+define i1 @ugt_swap_and_min_logical(ptr %x, ptr %y) {
; CHECK-LABEL: @ugt_swap_and_min_logical(
; CHECK-NEXT: ret i1 false
;
- %cmp = icmp ult i8* %y, %x
- %cmpeq = icmp eq i8* %x, null
+ %cmp = icmp ult ptr %y, %x
+ %cmpeq = icmp eq ptr %x, null
%r = select i1 %cmp, i1 %cmpeq, i1 false
ret i1 %r
}
-define i1 @ugt_swap_and_min_commute(i8* %x, i8* %y) {
+define i1 @ugt_swap_and_min_commute(ptr %x, ptr %y) {
; CHECK-LABEL: @ugt_swap_and_min_commute(
; CHECK-NEXT: ret i1 false
;
- %cmp = icmp ult i8* %y, %x
- %cmpeq = icmp eq i8* %x, null
+ %cmp = icmp ult ptr %y, %x
+ %cmpeq = icmp eq ptr %x, null
%r = and i1 %cmpeq, %cmp
ret i1 %r
}
-define i1 @ugt_swap_and_min_commute_logical(i8* %x, i8* %y) {
+define i1 @ugt_swap_and_min_commute_logical(ptr %x, ptr %y) {
; CHECK-LABEL: @ugt_swap_and_min_commute_logical(
; CHECK-NEXT: ret i1 false
;
- %cmp = icmp ult i8* %y, %x
- %cmpeq = icmp eq i8* %x, null
+ %cmp = icmp ult ptr %y, %x
+ %cmpeq = icmp eq ptr %x, null
%r = select i1 %cmpeq, i1 %cmp, i1 false
ret i1 %r
}
;
;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;
-define i1 @ule_or_not_min(i427* %x, i427* %y) {
+define i1 @ule_or_not_min(ptr %x, ptr %y) {
; CHECK-LABEL: @ule_or_not_min(
; CHECK-NEXT: ret i1 true
;
- %cmp = icmp ule i427* %x, %y
- %cmpeq = icmp ne i427* %x, null
+ %cmp = icmp ule ptr %x, %y
+ %cmpeq = icmp ne ptr %x, null
%r = or i1 %cmp, %cmpeq
ret i1 %r
}
-define i1 @ule_or_not_min_logical(i427* %x, i427* %y) {
+define i1 @ule_or_not_min_logical(ptr %x, ptr %y) {
; CHECK-LABEL: @ule_or_not_min_logical(
; CHECK-NEXT: ret i1 true
;
- %cmp = icmp ule i427* %x, %y
- %cmpeq = icmp ne i427* %x, null
+ %cmp = icmp ule ptr %x, %y
+ %cmpeq = icmp ne ptr %x, null
%r = select i1 %cmp, i1 true, i1 %cmpeq
ret i1 %r
}
-define i1 @ule_or_not_min_commute(<3 x i9>* %x, <3 x i9>* %y) {
+define i1 @ule_or_not_min_commute(ptr %x, ptr %y) {
; CHECK-LABEL: @ule_or_not_min_commute(
; CHECK-NEXT: ret i1 true
;
- %cmp = icmp ule <3 x i9>* %x, %y
- %cmpeq = icmp ne <3 x i9>* %x, null
+ %cmp = icmp ule ptr %x, %y
+ %cmpeq = icmp ne ptr %x, null
%r = or i1 %cmpeq, %cmp
ret i1 %r
}
-define i1 @ule_or_not_min_commute_logical(<3 x i9>* %x, <3 x i9>* %y) {
+define i1 @ule_or_not_min_commute_logical(ptr %x, ptr %y) {
; CHECK-LABEL: @ule_or_not_min_commute_logical(
; CHECK-NEXT: ret i1 true
;
- %cmp = icmp ule <3 x i9>* %x, %y
- %cmpeq = icmp ne <3 x i9>* %x, null
+ %cmp = icmp ule ptr %x, %y
+ %cmpeq = icmp ne ptr %x, null
%r = select i1 %cmpeq, i1 true, i1 %cmp
ret i1 %r
}
-define i1 @ule_swap_or_not_min(i8* %x, i8* %y) {
+define i1 @ule_swap_or_not_min(ptr %x, ptr %y) {
; CHECK-LABEL: @ule_swap_or_not_min(
; CHECK-NEXT: ret i1 true
;
- %cmp = icmp uge i8* %y, %x
- %cmpeq = icmp ne i8* %x, null
+ %cmp = icmp uge ptr %y, %x
+ %cmpeq = icmp ne ptr %x, null
%r = or i1 %cmp, %cmpeq
ret i1 %r
}
-define i1 @ule_swap_or_not_min_logical(i8* %x, i8* %y) {
+define i1 @ule_swap_or_not_min_logical(ptr %x, ptr %y) {
; CHECK-LABEL: @ule_swap_or_not_min_logical(
; CHECK-NEXT: ret i1 true
;
- %cmp = icmp uge i8* %y, %x
- %cmpeq = icmp ne i8* %x, null
+ %cmp = icmp uge ptr %y, %x
+ %cmpeq = icmp ne ptr %x, null
%r = select i1 %cmp, i1 true, i1 %cmpeq
ret i1 %r
}
-define i1 @ule_swap_or_not_min_commute(i8* %x, i8* %y) {
+define i1 @ule_swap_or_not_min_commute(ptr %x, ptr %y) {
; CHECK-LABEL: @ule_swap_or_not_min_commute(
; CHECK-NEXT: ret i1 true
;
- %cmp = icmp uge i8* %y, %x
- %cmpeq = icmp ne i8* %x, null
+ %cmp = icmp uge ptr %y, %x
+ %cmpeq = icmp ne ptr %x, null
%r = or i1 %cmpeq, %cmp
ret i1 %r
}
-define i1 @ule_swap_or_not_min_commute_logical(i8* %x, i8* %y) {
+define i1 @ule_swap_or_not_min_commute_logical(ptr %x, ptr %y) {
; CHECK-LABEL: @ule_swap_or_not_min_commute_logical(
; CHECK-NEXT: ret i1 true
;
- %cmp = icmp uge i8* %y, %x
- %cmpeq = icmp ne i8* %x, null
+ %cmp = icmp uge ptr %y, %x
+ %cmpeq = icmp ne ptr %x, null
%r = select i1 %cmpeq, i1 true, i1 %cmp
ret i1 %r
}
;
;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;
-define i1 @ule_and_min(i8* %x, i8* %y) {
+define i1 @ule_and_min(ptr %x, ptr %y) {
; CHECK-LABEL: @ule_and_min(
-; CHECK-NEXT: [[CMPEQ:%.*]] = icmp eq i8* [[X:%.*]], null
+; CHECK-NEXT: [[CMPEQ:%.*]] = icmp eq ptr [[X:%.*]], null
; CHECK-NEXT: ret i1 [[CMPEQ]]
;
- %cmp = icmp ule i8* %x, %y
- %cmpeq = icmp eq i8* %x, null
+ %cmp = icmp ule ptr %x, %y
+ %cmpeq = icmp eq ptr %x, null
%r = and i1 %cmp, %cmpeq
ret i1 %r
}
-define i1 @ule_and_min_logical(i8* %x, i8* %y) {
+define i1 @ule_and_min_logical(ptr %x, ptr %y) {
; CHECK-LABEL: @ule_and_min_logical(
-; CHECK-NEXT: [[CMPEQ:%.*]] = icmp eq i8* [[X:%.*]], null
+; CHECK-NEXT: [[CMPEQ:%.*]] = icmp eq ptr [[X:%.*]], null
; CHECK-NEXT: ret i1 [[CMPEQ]]
;
- %cmp = icmp ule i8* %x, %y
- %cmpeq = icmp eq i8* %x, null
+ %cmp = icmp ule ptr %x, %y
+ %cmpeq = icmp eq ptr %x, null
%r = select i1 %cmp, i1 %cmpeq, i1 false
ret i1 %r
}
-define i1 @ule_and_min_commute(i8* %x, i8* %y) {
+define i1 @ule_and_min_commute(ptr %x, ptr %y) {
; CHECK-LABEL: @ule_and_min_commute(
-; CHECK-NEXT: [[CMPEQ:%.*]] = icmp eq i8* [[X:%.*]], null
+; CHECK-NEXT: [[CMPEQ:%.*]] = icmp eq ptr [[X:%.*]], null
; CHECK-NEXT: ret i1 [[CMPEQ]]
;
- %cmp = icmp ule i8* %x, %y
- %cmpeq = icmp eq i8* %x, null
+ %cmp = icmp ule ptr %x, %y
+ %cmpeq = icmp eq ptr %x, null
%r = and i1 %cmpeq, %cmp
ret i1 %r
}
-define i1 @ule_and_min_commute_logical(i8* %x, i8* %y) {
+define i1 @ule_and_min_commute_logical(ptr %x, ptr %y) {
; CHECK-LABEL: @ule_and_min_commute_logical(
-; CHECK-NEXT: [[CMPEQ:%.*]] = icmp eq i8* [[X:%.*]], null
+; CHECK-NEXT: [[CMPEQ:%.*]] = icmp eq ptr [[X:%.*]], null
; CHECK-NEXT: ret i1 [[CMPEQ]]
;
- %cmp = icmp ule i8* %x, %y
- %cmpeq = icmp eq i8* %x, null
+ %cmp = icmp ule ptr %x, %y
+ %cmpeq = icmp eq ptr %x, null
%r = select i1 %cmpeq, i1 %cmp, i1 false
ret i1 %r
}
-define i1 @ule_swap_and_min(i8* %x, i8* %y) {
+define i1 @ule_swap_and_min(ptr %x, ptr %y) {
; CHECK-LABEL: @ule_swap_and_min(
-; CHECK-NEXT: [[CMPEQ:%.*]] = icmp eq i8* [[X:%.*]], null
+; CHECK-NEXT: [[CMPEQ:%.*]] = icmp eq ptr [[X:%.*]], null
; CHECK-NEXT: ret i1 [[CMPEQ]]
;
- %cmp = icmp uge i8* %y, %x
- %cmpeq = icmp eq i8* %x, null
+ %cmp = icmp uge ptr %y, %x
+ %cmpeq = icmp eq ptr %x, null
%r = and i1 %cmp, %cmpeq
ret i1 %r
}
-define i1 @ule_swap_and_min_logical(i8* %x, i8* %y) {
+define i1 @ule_swap_and_min_logical(ptr %x, ptr %y) {
; CHECK-LABEL: @ule_swap_and_min_logical(
-; CHECK-NEXT: [[CMPEQ:%.*]] = icmp eq i8* [[X:%.*]], null
+; CHECK-NEXT: [[CMPEQ:%.*]] = icmp eq ptr [[X:%.*]], null
; CHECK-NEXT: ret i1 [[CMPEQ]]
;
- %cmp = icmp uge i8* %y, %x
- %cmpeq = icmp eq i8* %x, null
+ %cmp = icmp uge ptr %y, %x
+ %cmpeq = icmp eq ptr %x, null
%r = select i1 %cmp, i1 %cmpeq, i1 false
ret i1 %r
}
-define i1 @ule_swap_and_min_commute(i8* %x, i8* %y) {
+define i1 @ule_swap_and_min_commute(ptr %x, ptr %y) {
; CHECK-LABEL: @ule_swap_and_min_commute(
-; CHECK-NEXT: [[CMPEQ:%.*]] = icmp eq i8* [[X:%.*]], null
+; CHECK-NEXT: [[CMPEQ:%.*]] = icmp eq ptr [[X:%.*]], null
; CHECK-NEXT: ret i1 [[CMPEQ]]
;
- %cmp = icmp uge i8* %y, %x
- %cmpeq = icmp eq i8* %x, null
+ %cmp = icmp uge ptr %y, %x
+ %cmpeq = icmp eq ptr %x, null
%r = and i1 %cmpeq, %cmp
ret i1 %r
}
-define i1 @ule_swap_and_min_commute_logical(i8* %x, i8* %y) {
+define i1 @ule_swap_and_min_commute_logical(ptr %x, ptr %y) {
; CHECK-LABEL: @ule_swap_and_min_commute_logical(
-; CHECK-NEXT: [[CMPEQ:%.*]] = icmp eq i8* [[X:%.*]], null
+; CHECK-NEXT: [[CMPEQ:%.*]] = icmp eq ptr [[X:%.*]], null
; CHECK-NEXT: ret i1 [[CMPEQ]]
;
- %cmp = icmp uge i8* %y, %x
- %cmpeq = icmp eq i8* %x, null
+ %cmp = icmp uge ptr %y, %x
+ %cmpeq = icmp eq ptr %x, null
%r = select i1 %cmpeq, i1 %cmp, i1 false
ret i1 %r
}
;
;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;
-define i1 @ule_or_min(i8* %x, i8* %y) {
+define i1 @ule_or_min(ptr %x, ptr %y) {
; CHECK-LABEL: @ule_or_min(
-; CHECK-NEXT: [[CMP:%.*]] = icmp ule i8* [[X:%.*]], [[Y:%.*]]
+; CHECK-NEXT: [[CMP:%.*]] = icmp ule ptr [[X:%.*]], [[Y:%.*]]
; CHECK-NEXT: ret i1 [[CMP]]
;
- %cmp = icmp ule i8* %x, %y
- %cmpeq = icmp eq i8* %x, null
+ %cmp = icmp ule ptr %x, %y
+ %cmpeq = icmp eq ptr %x, null
%r = or i1 %cmp, %cmpeq
ret i1 %r
}
-define i1 @ule_or_min_logical(i8* %x, i8* %y) {
+define i1 @ule_or_min_logical(ptr %x, ptr %y) {
; CHECK-LABEL: @ule_or_min_logical(
-; CHECK-NEXT: [[CMP:%.*]] = icmp ule i8* [[X:%.*]], [[Y:%.*]]
+; CHECK-NEXT: [[CMP:%.*]] = icmp ule ptr [[X:%.*]], [[Y:%.*]]
; CHECK-NEXT: ret i1 [[CMP]]
;
- %cmp = icmp ule i8* %x, %y
- %cmpeq = icmp eq i8* %x, null
+ %cmp = icmp ule ptr %x, %y
+ %cmpeq = icmp eq ptr %x, null
%r = select i1 %cmp, i1 true, i1 %cmpeq
ret i1 %r
}
-define i1 @ule_or_min_commute(i8* %x, i8* %y) {
+define i1 @ule_or_min_commute(ptr %x, ptr %y) {
; CHECK-LABEL: @ule_or_min_commute(
-; CHECK-NEXT: [[CMP:%.*]] = icmp ule i8* [[X:%.*]], [[Y:%.*]]
+; CHECK-NEXT: [[CMP:%.*]] = icmp ule ptr [[X:%.*]], [[Y:%.*]]
; CHECK-NEXT: ret i1 [[CMP]]
;
- %cmp = icmp ule i8* %x, %y
- %cmpeq = icmp eq i8* %x, null
+ %cmp = icmp ule ptr %x, %y
+ %cmpeq = icmp eq ptr %x, null
%r = or i1 %cmpeq, %cmp
ret i1 %r
}
-define i1 @ule_or_min_commute_logical(i8* %x, i8* %y) {
+define i1 @ule_or_min_commute_logical(ptr %x, ptr %y) {
; CHECK-LABEL: @ule_or_min_commute_logical(
-; CHECK-NEXT: [[CMP:%.*]] = icmp ule i8* [[X:%.*]], [[Y:%.*]]
-; CHECK-NEXT: [[CMPEQ:%.*]] = icmp eq i8* [[X]], null
+; CHECK-NEXT: [[CMP:%.*]] = icmp ule ptr [[X:%.*]], [[Y:%.*]]
+; CHECK-NEXT: [[CMPEQ:%.*]] = icmp eq ptr [[X]], null
; CHECK-NEXT: [[R:%.*]] = select i1 [[CMPEQ]], i1 true, i1 [[CMP]]
; CHECK-NEXT: ret i1 [[R]]
;
- %cmp = icmp ule i8* %x, %y
- %cmpeq = icmp eq i8* %x, null
+ %cmp = icmp ule ptr %x, %y
+ %cmpeq = icmp eq ptr %x, null
%r = select i1 %cmpeq, i1 true, i1 %cmp
ret i1 %r
}
-define i1 @ule_swap_or_min(i8* %x, i8* %y) {
+define i1 @ule_swap_or_min(ptr %x, ptr %y) {
; CHECK-LABEL: @ule_swap_or_min(
-; CHECK-NEXT: [[CMP:%.*]] = icmp uge i8* [[Y:%.*]], [[X:%.*]]
+; CHECK-NEXT: [[CMP:%.*]] = icmp uge ptr [[Y:%.*]], [[X:%.*]]
; CHECK-NEXT: ret i1 [[CMP]]
;
- %cmp = icmp uge i8* %y, %x
- %cmpeq = icmp eq i8* %x, null
+ %cmp = icmp uge ptr %y, %x
+ %cmpeq = icmp eq ptr %x, null
%r = or i1 %cmp, %cmpeq
ret i1 %r
}
-define i1 @ule_swap_or_min_logical(i8* %x, i8* %y) {
+define i1 @ule_swap_or_min_logical(ptr %x, ptr %y) {
; CHECK-LABEL: @ule_swap_or_min_logical(
-; CHECK-NEXT: [[CMP:%.*]] = icmp uge i8* [[Y:%.*]], [[X:%.*]]
+; CHECK-NEXT: [[CMP:%.*]] = icmp uge ptr [[Y:%.*]], [[X:%.*]]
; CHECK-NEXT: ret i1 [[CMP]]
;
- %cmp = icmp uge i8* %y, %x
- %cmpeq = icmp eq i8* %x, null
+ %cmp = icmp uge ptr %y, %x
+ %cmpeq = icmp eq ptr %x, null
%r = select i1 %cmp, i1 true, i1 %cmpeq
ret i1 %r
}
-define i1 @ule_swap_or_min_commute(i8* %x, i8* %y) {
+define i1 @ule_swap_or_min_commute(ptr %x, ptr %y) {
; CHECK-LABEL: @ule_swap_or_min_commute(
-; CHECK-NEXT: [[CMP:%.*]] = icmp uge i8* [[Y:%.*]], [[X:%.*]]
+; CHECK-NEXT: [[CMP:%.*]] = icmp uge ptr [[Y:%.*]], [[X:%.*]]
; CHECK-NEXT: ret i1 [[CMP]]
;
- %cmp = icmp uge i8* %y, %x
- %cmpeq = icmp eq i8* %x, null
+ %cmp = icmp uge ptr %y, %x
+ %cmpeq = icmp eq ptr %x, null
%r = or i1 %cmpeq, %cmp
ret i1 %r
}
-define i1 @ule_swap_or_min_commute_logical(i8* %x, i8* %y) {
+define i1 @ule_swap_or_min_commute_logical(ptr %x, ptr %y) {
; CHECK-LABEL: @ule_swap_or_min_commute_logical(
-; CHECK-NEXT: [[CMP:%.*]] = icmp uge i8* [[Y:%.*]], [[X:%.*]]
-; CHECK-NEXT: [[CMPEQ:%.*]] = icmp eq i8* [[X]], null
+; CHECK-NEXT: [[CMP:%.*]] = icmp uge ptr [[Y:%.*]], [[X:%.*]]
+; CHECK-NEXT: [[CMPEQ:%.*]] = icmp eq ptr [[X]], null
; CHECK-NEXT: [[R:%.*]] = select i1 [[CMPEQ]], i1 true, i1 [[CMP]]
; CHECK-NEXT: ret i1 [[R]]
;
- %cmp = icmp uge i8* %y, %x
- %cmpeq = icmp eq i8* %x, null
+ %cmp = icmp uge ptr %y, %x
+ %cmpeq = icmp eq ptr %x, null
%r = select i1 %cmpeq, i1 true, i1 %cmp
ret i1 %r
}
;
;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;
-define i1 @ugt_and_not_min(i8* %x, i8* %y) {
+define i1 @ugt_and_not_min(ptr %x, ptr %y) {
; CHECK-LABEL: @ugt_and_not_min(
-; CHECK-NEXT: [[CMP:%.*]] = icmp ugt i8* [[X:%.*]], [[Y:%.*]]
+; CHECK-NEXT: [[CMP:%.*]] = icmp ugt ptr [[X:%.*]], [[Y:%.*]]
; CHECK-NEXT: ret i1 [[CMP]]
;
- %cmp = icmp ugt i8* %x, %y
- %cmpeq = icmp ne i8* %x, null
+ %cmp = icmp ugt ptr %x, %y
+ %cmpeq = icmp ne ptr %x, null
%r = and i1 %cmp, %cmpeq
ret i1 %r
}
-define i1 @ugt_and_not_min_logical(i8* %x, i8* %y) {
+define i1 @ugt_and_not_min_logical(ptr %x, ptr %y) {
; CHECK-LABEL: @ugt_and_not_min_logical(
-; CHECK-NEXT: [[CMP:%.*]] = icmp ugt i8* [[X:%.*]], [[Y:%.*]]
+; CHECK-NEXT: [[CMP:%.*]] = icmp ugt ptr [[X:%.*]], [[Y:%.*]]
; CHECK-NEXT: ret i1 [[CMP]]
;
- %cmp = icmp ugt i8* %x, %y
- %cmpeq = icmp ne i8* %x, null
+ %cmp = icmp ugt ptr %x, %y
+ %cmpeq = icmp ne ptr %x, null
%r = select i1 %cmp, i1 %cmpeq, i1 false
ret i1 %r
}
-define i1 @ugt_and_not_min_commute(i8* %x, i8* %y) {
+define i1 @ugt_and_not_min_commute(ptr %x, ptr %y) {
; CHECK-LABEL: @ugt_and_not_min_commute(
-; CHECK-NEXT: [[CMP:%.*]] = icmp ugt i8* [[X:%.*]], [[Y:%.*]]
+; CHECK-NEXT: [[CMP:%.*]] = icmp ugt ptr [[X:%.*]], [[Y:%.*]]
; CHECK-NEXT: ret i1 [[CMP]]
;
- %cmp = icmp ugt i8* %x, %y
- %cmpeq = icmp ne i8* %x, null
+ %cmp = icmp ugt ptr %x, %y
+ %cmpeq = icmp ne ptr %x, null
%r = and i1 %cmpeq, %cmp
ret i1 %r
}
-define i1 @ugt_and_not_min_commute_logical(i8* %x, i8* %y) {
+define i1 @ugt_and_not_min_commute_logical(ptr %x, ptr %y) {
; CHECK-LABEL: @ugt_and_not_min_commute_logical(
-; CHECK-NEXT: [[CMP:%.*]] = icmp ugt i8* [[X:%.*]], [[Y:%.*]]
-; CHECK-NEXT: [[CMPEQ:%.*]] = icmp ne i8* [[X]], null
+; CHECK-NEXT: [[CMP:%.*]] = icmp ugt ptr [[X:%.*]], [[Y:%.*]]
+; CHECK-NEXT: [[CMPEQ:%.*]] = icmp ne ptr [[X]], null
; CHECK-NEXT: [[R:%.*]] = select i1 [[CMPEQ]], i1 [[CMP]], i1 false
; CHECK-NEXT: ret i1 [[R]]
;
- %cmp = icmp ugt i8* %x, %y
- %cmpeq = icmp ne i8* %x, null
+ %cmp = icmp ugt ptr %x, %y
+ %cmpeq = icmp ne ptr %x, null
%r = select i1 %cmpeq, i1 %cmp, i1 false
ret i1 %r
}
-define i1 @ugt_swap_and_not_min(i8* %x, i8* %y) {
+define i1 @ugt_swap_and_not_min(ptr %x, ptr %y) {
; CHECK-LABEL: @ugt_swap_and_not_min(
-; CHECK-NEXT: [[CMP:%.*]] = icmp ult i8* [[Y:%.*]], [[X:%.*]]
+; CHECK-NEXT: [[CMP:%.*]] = icmp ult ptr [[Y:%.*]], [[X:%.*]]
; CHECK-NEXT: ret i1 [[CMP]]
;
- %cmp = icmp ult i8* %y, %x
- %cmpeq = icmp ne i8* %x, null
+ %cmp = icmp ult ptr %y, %x
+ %cmpeq = icmp ne ptr %x, null
%r = and i1 %cmp, %cmpeq
ret i1 %r
}
-define i1 @ugt_swap_and_not_min_logical(i8* %x, i8* %y) {
+define i1 @ugt_swap_and_not_min_logical(ptr %x, ptr %y) {
; CHECK-LABEL: @ugt_swap_and_not_min_logical(
-; CHECK-NEXT: [[CMP:%.*]] = icmp ult i8* [[Y:%.*]], [[X:%.*]]
+; CHECK-NEXT: [[CMP:%.*]] = icmp ult ptr [[Y:%.*]], [[X:%.*]]
; CHECK-NEXT: ret i1 [[CMP]]
;
- %cmp = icmp ult i8* %y, %x
- %cmpeq = icmp ne i8* %x, null
+ %cmp = icmp ult ptr %y, %x
+ %cmpeq = icmp ne ptr %x, null
%r = select i1 %cmp, i1 %cmpeq, i1 false
ret i1 %r
}
-define i1 @ugt_swap_and_not_min_commute(i8* %x, i8* %y) {
+define i1 @ugt_swap_and_not_min_commute(ptr %x, ptr %y) {
; CHECK-LABEL: @ugt_swap_and_not_min_commute(
-; CHECK-NEXT: [[CMP:%.*]] = icmp ult i8* [[Y:%.*]], [[X:%.*]]
+; CHECK-NEXT: [[CMP:%.*]] = icmp ult ptr [[Y:%.*]], [[X:%.*]]
; CHECK-NEXT: ret i1 [[CMP]]
;
- %cmp = icmp ult i8* %y, %x
- %cmpeq = icmp ne i8* %x, null
+ %cmp = icmp ult ptr %y, %x
+ %cmpeq = icmp ne ptr %x, null
%r = and i1 %cmpeq, %cmp
ret i1 %r
}
-define i1 @ugt_swap_and_not_min_commute_logical(i8* %x, i8* %y) {
+define i1 @ugt_swap_and_not_min_commute_logical(ptr %x, ptr %y) {
; CHECK-LABEL: @ugt_swap_and_not_min_commute_logical(
-; CHECK-NEXT: [[CMP:%.*]] = icmp ult i8* [[Y:%.*]], [[X:%.*]]
-; CHECK-NEXT: [[CMPEQ:%.*]] = icmp ne i8* [[X]], null
+; CHECK-NEXT: [[CMP:%.*]] = icmp ult ptr [[Y:%.*]], [[X:%.*]]
+; CHECK-NEXT: [[CMPEQ:%.*]] = icmp ne ptr [[X]], null
; CHECK-NEXT: [[R:%.*]] = select i1 [[CMPEQ]], i1 [[CMP]], i1 false
; CHECK-NEXT: ret i1 [[R]]
;
- %cmp = icmp ult i8* %y, %x
- %cmpeq = icmp ne i8* %x, null
+ %cmp = icmp ult ptr %y, %x
+ %cmpeq = icmp ne ptr %x, null
%r = select i1 %cmpeq, i1 %cmp, i1 false
ret i1 %r
}
;
;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;
-define i1 @ugt_or_not_min(i8* %x, i8* %y) {
+define i1 @ugt_or_not_min(ptr %x, ptr %y) {
; CHECK-LABEL: @ugt_or_not_min(
-; CHECK-NEXT: [[CMPEQ:%.*]] = icmp ne i8* [[X:%.*]], null
+; CHECK-NEXT: [[CMPEQ:%.*]] = icmp ne ptr [[X:%.*]], null
; CHECK-NEXT: ret i1 [[CMPEQ]]
;
- %cmp = icmp ugt i8* %x, %y
- %cmpeq = icmp ne i8* %x, null
+ %cmp = icmp ugt ptr %x, %y
+ %cmpeq = icmp ne ptr %x, null
%r = or i1 %cmp, %cmpeq
ret i1 %r
}
-define i1 @ugt_or_not_min_logical(i8* %x, i8* %y) {
+define i1 @ugt_or_not_min_logical(ptr %x, ptr %y) {
; CHECK-LABEL: @ugt_or_not_min_logical(
-; CHECK-NEXT: [[CMPEQ:%.*]] = icmp ne i8* [[X:%.*]], null
+; CHECK-NEXT: [[CMPEQ:%.*]] = icmp ne ptr [[X:%.*]], null
; CHECK-NEXT: ret i1 [[CMPEQ]]
;
- %cmp = icmp ugt i8* %x, %y
- %cmpeq = icmp ne i8* %x, null
+ %cmp = icmp ugt ptr %x, %y
+ %cmpeq = icmp ne ptr %x, null
%r = select i1 %cmp, i1 true, i1 %cmpeq
ret i1 %r
}
-define i1 @ugt_or_not_min_commute(i8* %x, i8* %y) {
+define i1 @ugt_or_not_min_commute(ptr %x, ptr %y) {
; CHECK-LABEL: @ugt_or_not_min_commute(
-; CHECK-NEXT: [[CMPEQ:%.*]] = icmp ne i8* [[X:%.*]], null
+; CHECK-NEXT: [[CMPEQ:%.*]] = icmp ne ptr [[X:%.*]], null
; CHECK-NEXT: ret i1 [[CMPEQ]]
;
- %cmp = icmp ugt i8* %x, %y
- %cmpeq = icmp ne i8* %x, null
+ %cmp = icmp ugt ptr %x, %y
+ %cmpeq = icmp ne ptr %x, null
%r = or i1 %cmpeq, %cmp
ret i1 %r
}
-define i1 @ugt_or_not_min_commute_logical(i8* %x, i8* %y) {
+define i1 @ugt_or_not_min_commute_logical(ptr %x, ptr %y) {
; CHECK-LABEL: @ugt_or_not_min_commute_logical(
-; CHECK-NEXT: [[CMPEQ:%.*]] = icmp ne i8* [[X:%.*]], null
+; CHECK-NEXT: [[CMPEQ:%.*]] = icmp ne ptr [[X:%.*]], null
; CHECK-NEXT: ret i1 [[CMPEQ]]
;
- %cmp = icmp ugt i8* %x, %y
- %cmpeq = icmp ne i8* %x, null
+ %cmp = icmp ugt ptr %x, %y
+ %cmpeq = icmp ne ptr %x, null
%r = select i1 %cmpeq, i1 true, i1 %cmp
ret i1 %r
}
-define i1 @ugt_swap_or_not_min(i8* %x, i8* %y) {
+define i1 @ugt_swap_or_not_min(ptr %x, ptr %y) {
; CHECK-LABEL: @ugt_swap_or_not_min(
-; CHECK-NEXT: [[CMPEQ:%.*]] = icmp ne i8* [[X:%.*]], null
+; CHECK-NEXT: [[CMPEQ:%.*]] = icmp ne ptr [[X:%.*]], null
; CHECK-NEXT: ret i1 [[CMPEQ]]
;
- %cmp = icmp ult i8* %y, %x
- %cmpeq = icmp ne i8* %x, null
+ %cmp = icmp ult ptr %y, %x
+ %cmpeq = icmp ne ptr %x, null
%r = or i1 %cmp, %cmpeq
ret i1 %r
}
-define i1 @ugt_swap_or_not_min_logical(i8* %x, i8* %y) {
+define i1 @ugt_swap_or_not_min_logical(ptr %x, ptr %y) {
; CHECK-LABEL: @ugt_swap_or_not_min_logical(
-; CHECK-NEXT: [[CMPEQ:%.*]] = icmp ne i8* [[X:%.*]], null
+; CHECK-NEXT: [[CMPEQ:%.*]] = icmp ne ptr [[X:%.*]], null
; CHECK-NEXT: ret i1 [[CMPEQ]]
;
- %cmp = icmp ult i8* %y, %x
- %cmpeq = icmp ne i8* %x, null
+ %cmp = icmp ult ptr %y, %x
+ %cmpeq = icmp ne ptr %x, null
%r = select i1 %cmp, i1 true, i1 %cmpeq
ret i1 %r
}
-define i1 @ugt_swap_or_not_min_commute(i823* %x, i823* %y) {
+define i1 @ugt_swap_or_not_min_commute(ptr %x, ptr %y) {
; CHECK-LABEL: @ugt_swap_or_not_min_commute(
-; CHECK-NEXT: [[CMPEQ:%.*]] = icmp ne i823* [[X:%.*]], null
+; CHECK-NEXT: [[CMPEQ:%.*]] = icmp ne ptr [[X:%.*]], null
; CHECK-NEXT: ret i1 [[CMPEQ]]
;
- %cmp = icmp ult i823* %y, %x
- %cmpeq = icmp ne i823* %x, null
+ %cmp = icmp ult ptr %y, %x
+ %cmpeq = icmp ne ptr %x, null
%r = or i1 %cmpeq, %cmp
ret i1 %r
}
-define i1 @ugt_swap_or_not_min_commute_logical(i823* %x, i823* %y) {
+define i1 @ugt_swap_or_not_min_commute_logical(ptr %x, ptr %y) {
; CHECK-LABEL: @ugt_swap_or_not_min_commute_logical(
-; CHECK-NEXT: [[CMPEQ:%.*]] = icmp ne i823* [[X:%.*]], null
+; CHECK-NEXT: [[CMPEQ:%.*]] = icmp ne ptr [[X:%.*]], null
; CHECK-NEXT: ret i1 [[CMPEQ]]
;
- %cmp = icmp ult i823* %y, %x
- %cmpeq = icmp ne i823* %x, null
+ %cmp = icmp ult ptr %y, %x
+ %cmpeq = icmp ne ptr %x, null
%r = select i1 %cmpeq, i1 true, i1 %cmp
ret i1 %r
}
-define i1 @sgt_and_min(i9* %x, i9* %y) {
+define i1 @sgt_and_min(ptr %x, ptr %y) {
; CHECK-LABEL: @sgt_and_min(
-; CHECK-NEXT: [[CMPEQ:%.*]] = icmp eq i9* [[X:%.*]], null
-; CHECK-NEXT: [[TMP1:%.*]] = icmp slt i9* [[Y:%.*]], null
+; CHECK-NEXT: [[CMPEQ:%.*]] = icmp eq ptr [[X:%.*]], null
+; CHECK-NEXT: [[TMP1:%.*]] = icmp slt ptr [[Y:%.*]], null
; CHECK-NEXT: [[TMP2:%.*]] = and i1 [[CMPEQ]], [[TMP1]]
; CHECK-NEXT: ret i1 [[TMP2]]
;
- %cmp = icmp sgt i9* %x, %y
- %cmpeq = icmp eq i9* %x, null
+ %cmp = icmp sgt ptr %x, %y
+ %cmpeq = icmp eq ptr %x, null
%r = and i1 %cmp, %cmpeq
ret i1 %r
}
-define i1 @sgt_and_min_logical(i9* %x, i9* %y) {
+define i1 @sgt_and_min_logical(ptr %x, ptr %y) {
; CHECK-LABEL: @sgt_and_min_logical(
-; CHECK-NEXT: [[CMPEQ:%.*]] = icmp eq i9* [[X:%.*]], null
-; CHECK-NEXT: [[TMP1:%.*]] = icmp slt i9* [[Y:%.*]], null
+; CHECK-NEXT: [[CMPEQ:%.*]] = icmp eq ptr [[X:%.*]], null
+; CHECK-NEXT: [[TMP1:%.*]] = icmp slt ptr [[Y:%.*]], null
; CHECK-NEXT: [[TMP2:%.*]] = and i1 [[CMPEQ]], [[TMP1]]
; CHECK-NEXT: ret i1 [[TMP2]]
;
- %cmp = icmp sgt i9* %x, %y
- %cmpeq = icmp eq i9* %x, null
+ %cmp = icmp sgt ptr %x, %y
+ %cmpeq = icmp eq ptr %x, null
%r = select i1 %cmp, i1 %cmpeq, i1 false
ret i1 %r
}
-define i1 @sle_or_not_min(i427* %x, i427* %y) {
+define i1 @sle_or_not_min(ptr %x, ptr %y) {
; CHECK-LABEL: @sle_or_not_min(
-; CHECK-NEXT: [[CMPEQ:%.*]] = icmp ne i427* [[X:%.*]], null
-; CHECK-NEXT: [[TMP1:%.*]] = icmp sge i427* [[Y:%.*]], null
+; CHECK-NEXT: [[CMPEQ:%.*]] = icmp ne ptr [[X:%.*]], null
+; CHECK-NEXT: [[TMP1:%.*]] = icmp sge ptr [[Y:%.*]], null
; CHECK-NEXT: [[TMP2:%.*]] = or i1 [[CMPEQ]], [[TMP1]]
; CHECK-NEXT: ret i1 [[TMP2]]
;
- %cmp = icmp sle i427* %x, %y
- %cmpeq = icmp ne i427* %x, null
+ %cmp = icmp sle ptr %x, %y
+ %cmpeq = icmp ne ptr %x, null
%r = or i1 %cmp, %cmpeq
ret i1 %r
}
-define i1 @sle_or_not_min_logical(i427* %x, i427* %y) {
+define i1 @sle_or_not_min_logical(ptr %x, ptr %y) {
; CHECK-LABEL: @sle_or_not_min_logical(
-; CHECK-NEXT: [[CMPEQ:%.*]] = icmp ne i427* [[X:%.*]], null
-; CHECK-NEXT: [[TMP1:%.*]] = icmp sge i427* [[Y:%.*]], null
+; CHECK-NEXT: [[CMPEQ:%.*]] = icmp ne ptr [[X:%.*]], null
+; CHECK-NEXT: [[TMP1:%.*]] = icmp sge ptr [[Y:%.*]], null
; CHECK-NEXT: [[TMP2:%.*]] = or i1 [[CMPEQ]], [[TMP1]]
; CHECK-NEXT: ret i1 [[TMP2]]
;
- %cmp = icmp sle i427* %x, %y
- %cmpeq = icmp ne i427* %x, null
+ %cmp = icmp sle ptr %x, %y
+ %cmpeq = icmp ne ptr %x, null
%r = select i1 %cmp, i1 true, i1 %cmpeq
ret i1 %r
}
-define i1 @sle_and_min(i8* %x, i8* %y) {
+define i1 @sle_and_min(ptr %x, ptr %y) {
; CHECK-LABEL: @sle_and_min(
-; CHECK-NEXT: [[CMPEQ:%.*]] = icmp eq i8* [[X:%.*]], null
-; CHECK-NEXT: [[TMP1:%.*]] = icmp sge i8* [[Y:%.*]], null
+; CHECK-NEXT: [[CMPEQ:%.*]] = icmp eq ptr [[X:%.*]], null
+; CHECK-NEXT: [[TMP1:%.*]] = icmp sge ptr [[Y:%.*]], null
; CHECK-NEXT: [[TMP2:%.*]] = and i1 [[CMPEQ]], [[TMP1]]
; CHECK-NEXT: ret i1 [[TMP2]]
;
- %cmp = icmp sle i8* %x, %y
- %cmpeq = icmp eq i8* %x, null
+ %cmp = icmp sle ptr %x, %y
+ %cmpeq = icmp eq ptr %x, null
%r = and i1 %cmp, %cmpeq
ret i1 %r
}
-define i1 @sle_and_min_logical(i8* %x, i8* %y) {
+define i1 @sle_and_min_logical(ptr %x, ptr %y) {
; CHECK-LABEL: @sle_and_min_logical(
-; CHECK-NEXT: [[CMPEQ:%.*]] = icmp eq i8* [[X:%.*]], null
-; CHECK-NEXT: [[TMP1:%.*]] = icmp sge i8* [[Y:%.*]], null
+; CHECK-NEXT: [[CMPEQ:%.*]] = icmp eq ptr [[X:%.*]], null
+; CHECK-NEXT: [[TMP1:%.*]] = icmp sge ptr [[Y:%.*]], null
; CHECK-NEXT: [[TMP2:%.*]] = and i1 [[CMPEQ]], [[TMP1]]
; CHECK-NEXT: ret i1 [[TMP2]]
;
- %cmp = icmp sle i8* %x, %y
- %cmpeq = icmp eq i8* %x, null
+ %cmp = icmp sle ptr %x, %y
+ %cmpeq = icmp eq ptr %x, null
%r = select i1 %cmp, i1 %cmpeq, i1 false
ret i1 %r
}
-define i1 @sgt_and_not_min(i8* %x, i8* %y) {
+define i1 @sgt_and_not_min(ptr %x, ptr %y) {
; CHECK-LABEL: @sgt_and_not_min(
-; CHECK-NEXT: [[CMP:%.*]] = icmp sgt i8* [[X:%.*]], [[Y:%.*]]
-; CHECK-NEXT: [[CMPEQ:%.*]] = icmp ne i8* [[X]], null
+; CHECK-NEXT: [[CMP:%.*]] = icmp sgt ptr [[X:%.*]], [[Y:%.*]]
+; CHECK-NEXT: [[CMPEQ:%.*]] = icmp ne ptr [[X]], null
; CHECK-NEXT: [[R:%.*]] = and i1 [[CMP]], [[CMPEQ]]
; CHECK-NEXT: ret i1 [[R]]
;
- %cmp = icmp sgt i8* %x, %y
- %cmpeq = icmp ne i8* %x, null
+ %cmp = icmp sgt ptr %x, %y
+ %cmpeq = icmp ne ptr %x, null
%r = and i1 %cmp, %cmpeq
ret i1 %r
}
-define i1 @sgt_and_not_min_logical(i8* %x, i8* %y) {
+define i1 @sgt_and_not_min_logical(ptr %x, ptr %y) {
; CHECK-LABEL: @sgt_and_not_min_logical(
-; CHECK-NEXT: [[CMP:%.*]] = icmp sgt i8* [[X:%.*]], [[Y:%.*]]
-; CHECK-NEXT: [[CMPEQ:%.*]] = icmp ne i8* [[X]], null
+; CHECK-NEXT: [[CMP:%.*]] = icmp sgt ptr [[X:%.*]], [[Y:%.*]]
+; CHECK-NEXT: [[CMPEQ:%.*]] = icmp ne ptr [[X]], null
; CHECK-NEXT: [[R:%.*]] = and i1 [[CMP]], [[CMPEQ]]
; CHECK-NEXT: ret i1 [[R]]
;
- %cmp = icmp sgt i8* %x, %y
- %cmpeq = icmp ne i8* %x, null
+ %cmp = icmp sgt ptr %x, %y
+ %cmpeq = icmp ne ptr %x, null
%r = select i1 %cmp, i1 %cmpeq, i1 false
ret i1 %r
}
-define i1 @sgt_or_not_min(i8* %x, i8* %y) {
+define i1 @sgt_or_not_min(ptr %x, ptr %y) {
; CHECK-LABEL: @sgt_or_not_min(
-; CHECK-NEXT: [[CMPEQ:%.*]] = icmp ne i8* [[X:%.*]], null
-; CHECK-NEXT: [[TMP1:%.*]] = icmp slt i8* [[Y:%.*]], null
+; CHECK-NEXT: [[CMPEQ:%.*]] = icmp ne ptr [[X:%.*]], null
+; CHECK-NEXT: [[TMP1:%.*]] = icmp slt ptr [[Y:%.*]], null
; CHECK-NEXT: [[TMP2:%.*]] = or i1 [[CMPEQ]], [[TMP1]]
; CHECK-NEXT: ret i1 [[TMP2]]
;
- %cmp = icmp sgt i8* %x, %y
- %cmpeq = icmp ne i8* %x, null
+ %cmp = icmp sgt ptr %x, %y
+ %cmpeq = icmp ne ptr %x, null
%r = or i1 %cmp, %cmpeq
ret i1 %r
}
-define i1 @sgt_or_not_min_logical(i8* %x, i8* %y) {
+define i1 @sgt_or_not_min_logical(ptr %x, ptr %y) {
; CHECK-LABEL: @sgt_or_not_min_logical(
-; CHECK-NEXT: [[CMPEQ:%.*]] = icmp ne i8* [[X:%.*]], null
-; CHECK-NEXT: [[TMP1:%.*]] = icmp slt i8* [[Y:%.*]], null
+; CHECK-NEXT: [[CMPEQ:%.*]] = icmp ne ptr [[X:%.*]], null
+; CHECK-NEXT: [[TMP1:%.*]] = icmp slt ptr [[Y:%.*]], null
; CHECK-NEXT: [[TMP2:%.*]] = or i1 [[CMPEQ]], [[TMP1]]
; CHECK-NEXT: ret i1 [[TMP2]]
;
- %cmp = icmp sgt i8* %x, %y
- %cmpeq = icmp ne i8* %x, null
+ %cmp = icmp sgt ptr %x, %y
+ %cmpeq = icmp ne ptr %x, null
%r = select i1 %cmp, i1 true, i1 %cmpeq
ret i1 %r
}
-define i1 @slt_and_min(i8* %a, i8* %b) {
+define i1 @slt_and_min(ptr %a, ptr %b) {
; CHECK-LABEL: @slt_and_min(
-; CHECK-NEXT: [[CMPEQ:%.*]] = icmp eq i8* [[A:%.*]], null
-; CHECK-NEXT: [[TMP1:%.*]] = icmp sgt i8* [[B:%.*]], null
+; CHECK-NEXT: [[CMPEQ:%.*]] = icmp eq ptr [[A:%.*]], null
+; CHECK-NEXT: [[TMP1:%.*]] = icmp sgt ptr [[B:%.*]], null
; CHECK-NEXT: [[TMP2:%.*]] = and i1 [[CMPEQ]], [[TMP1]]
; CHECK-NEXT: ret i1 [[TMP2]]
;
- %cmpeq = icmp eq i8* %a, null
- %cmp = icmp slt i8* %a, %b
+ %cmpeq = icmp eq ptr %a, null
+ %cmp = icmp slt ptr %a, %b
%r = and i1 %cmpeq, %cmp
ret i1 %r
}
-define i1 @slt_and_min_logical(i8* %a, i8* %b) {
+define i1 @slt_and_min_logical(ptr %a, ptr %b) {
; CHECK-LABEL: @slt_and_min_logical(
-; CHECK-NEXT: [[CMPEQ:%.*]] = icmp eq i8* [[A:%.*]], null
-; CHECK-NEXT: [[CMP:%.*]] = icmp sgt i8* [[B:%.*]], null
+; CHECK-NEXT: [[CMPEQ:%.*]] = icmp eq ptr [[A:%.*]], null
+; CHECK-NEXT: [[CMP:%.*]] = icmp sgt ptr [[B:%.*]], null
; CHECK-NEXT: [[R:%.*]] = select i1 [[CMPEQ]], i1 [[CMP]], i1 false
; CHECK-NEXT: ret i1 [[R]]
;
- %cmpeq = icmp eq i8* %a, null
- %cmp = icmp slt i8* %a, %b
+ %cmpeq = icmp eq ptr %a, null
+ %cmp = icmp slt ptr %a, %b
%r = select i1 %cmpeq, i1 %cmp, i1 false
ret i1 %r
}
define void @simplify_before_foldAndOfICmps() {
; CHECK-LABEL: @simplify_before_foldAndOfICmps(
; CHECK-NEXT: [[A8:%.*]] = alloca i16, align 2
-; CHECK-NEXT: [[L7:%.*]] = load i16, i16* [[A8]], align 2
+; CHECK-NEXT: [[L7:%.*]] = load i16, ptr [[A8]], align 2
; CHECK-NEXT: [[TMP1:%.*]] = icmp eq i16 [[L7]], -1
; CHECK-NEXT: [[B11:%.*]] = zext i1 [[TMP1]] to i16
; CHECK-NEXT: [[C10:%.*]] = icmp ugt i16 [[L7]], [[B11]]
; CHECK-NEXT: [[TMP3:%.*]] = xor i1 [[C10]], true
; CHECK-NEXT: [[C18:%.*]] = or i1 [[C7]], [[TMP3]]
; CHECK-NEXT: [[TMP4:%.*]] = sext i1 [[C3]] to i64
-; CHECK-NEXT: [[G26:%.*]] = getelementptr i1, i1* null, i64 [[TMP4]]
-; CHECK-NEXT: store i16 [[L7]], i16* undef, align 2
-; CHECK-NEXT: store i1 [[C18]], i1* undef, align 1
-; CHECK-NEXT: store i1* [[G26]], i1** undef, align 8
+; CHECK-NEXT: [[G26:%.*]] = getelementptr i1, ptr null, i64 [[TMP4]]
+; CHECK-NEXT: store i16 [[L7]], ptr undef, align 2
+; CHECK-NEXT: store i1 [[C18]], ptr undef, align 1
+; CHECK-NEXT: store ptr [[G26]], ptr undef, align 8
; CHECK-NEXT: ret void
;
%A8 = alloca i16
- %L7 = load i16, i16* %A8
- %G21 = getelementptr i16, i16* %A8, i8 -1
+ %L7 = load i16, ptr %A8
+ %G21 = getelementptr i16, ptr %A8, i8 -1
%B11 = udiv i16 %L7, -1
- %G4 = getelementptr i16, i16* %A8, i16 %B11
- %L2 = load i16, i16* %G4
- %L = load i16, i16* %G4
+ %G4 = getelementptr i16, ptr %A8, i16 %B11
+ %L2 = load i16, ptr %G4
+ %L = load i16, ptr %G4
%B23 = mul i16 %B11, %B11
- %L4 = load i16, i16* %A8
+ %L4 = load i16, ptr %A8
%B21 = sdiv i16 %L7, %L4
%B7 = sub i16 0, %B21
%B18 = mul i16 %B23, %B7
%B33 = or i16 %B29, %L4
%C13 = icmp uge i1 %C5, %B1
%C3 = icmp ult i1 %C13, %C6
- store i16 undef, i16* %G21
+ store i16 undef, ptr %G21
%C18 = icmp ule i1 %C10, %C7
- %G26 = getelementptr i1, i1* null, i1 %C3
- store i16 %B33, i16* undef
- store i1 %C18, i1* undef
- store i1* %G26, i1** undef
+ %G26 = getelementptr i1, ptr null, i1 %C3
+ store i16 %B33, ptr undef
+ store i1 %C18, ptr undef
+ store ptr %G26, ptr undef
ret void
}
; a & (a ^ ~b) --> a & b
-define i32 @and_xor_not_common_op_extrause(i32 %a, i32 %b, i32* %dst) {
+define i32 @and_xor_not_common_op_extrause(i32 %a, i32 %b, ptr %dst) {
; CHECK-LABEL: define {{[^@]+}}@and_xor_not_common_op_extrause
-; CHECK-SAME: (i32 [[A:%.*]], i32 [[B:%.*]], i32* [[DST:%.*]]) {
+; CHECK-SAME: (i32 [[A:%.*]], i32 [[B:%.*]], ptr [[DST:%.*]]) {
; CHECK-NEXT: [[B2:%.*]] = xor i32 [[B]], -1
-; CHECK-NEXT: store i32 [[B2]], i32* [[DST]], align 4
+; CHECK-NEXT: store i32 [[B2]], ptr [[DST]], align 4
; CHECK-NEXT: [[T4:%.*]] = and i32 [[A]], [[B]]
; CHECK-NEXT: ret i32 [[T4]]
;
%b2 = xor i32 %b, -1
- store i32 %b2, i32* %dst
+ store i32 %b2, ptr %dst
%t2 = xor i32 %a, %b2
%t4 = and i32 %t2, %a
ret i32 %t4
ret i32 %D
}
-define i32 @test11(i32 %A, i32* %P) {
+define i32 @test11(i32 %A, ptr %P) {
; CHECK-LABEL: @test11(
; CHECK-NEXT: [[TMP1:%.*]] = and i32 [[A:%.*]], -4
; CHECK-NEXT: [[C:%.*]] = xor i32 [[TMP1]], 15
-; CHECK-NEXT: store i32 [[C]], i32* [[P:%.*]], align 4
+; CHECK-NEXT: store i32 [[C]], ptr [[P:%.*]], align 4
; CHECK-NEXT: ret i32 3
;
%B = or i32 %A, 3
%C = xor i32 %B, 12
; additional use of C
- store i32 %C, i32* %P
+ store i32 %C, ptr %P
; %C = and uint %B, 3 --> 3
%D = and i32 %C, 3
ret i32 %D
ret i32 %r
}
-define <2 x i8> @ashr_lowmask_use_splat(<2 x i8> %x, <2 x i8>* %p) {
+define <2 x i8> @ashr_lowmask_use_splat(<2 x i8> %x, ptr %p) {
; CHECK-LABEL: @ashr_lowmask_use_splat(
; CHECK-NEXT: [[A:%.*]] = ashr <2 x i8> [[X:%.*]], <i8 7, i8 7>
-; CHECK-NEXT: store <2 x i8> [[A]], <2 x i8>* [[P:%.*]], align 2
+; CHECK-NEXT: store <2 x i8> [[A]], ptr [[P:%.*]], align 2
; CHECK-NEXT: [[R:%.*]] = lshr <2 x i8> [[X]], <i8 7, i8 7>
; CHECK-NEXT: ret <2 x i8> [[R]]
;
%a = ashr <2 x i8> %x, <i8 7, i8 7>
- store <2 x i8> %a, <2 x i8>* %p
+ store <2 x i8> %a, ptr %p
%r = and <2 x i8> %a, <i8 1, i8 1>
ret <2 x i8> %r
}
ret i32 %and
}
-define <2 x i32> @lowmask_sext_in_reg_splat(<2 x i32> %x, <2 x i32>* %p) {
+define <2 x i32> @lowmask_sext_in_reg_splat(<2 x i32> %x, ptr %p) {
; CHECK-LABEL: @lowmask_sext_in_reg_splat(
; CHECK-NEXT: [[L:%.*]] = shl <2 x i32> [[X:%.*]], <i32 20, i32 20>
; CHECK-NEXT: [[R:%.*]] = ashr exact <2 x i32> [[L]], <i32 20, i32 20>
-; CHECK-NEXT: store <2 x i32> [[R]], <2 x i32>* [[P:%.*]], align 8
+; CHECK-NEXT: store <2 x i32> [[R]], ptr [[P:%.*]], align 8
; CHECK-NEXT: [[AND:%.*]] = and <2 x i32> [[X]], <i32 4095, i32 4095>
; CHECK-NEXT: ret <2 x i32> [[AND]]
;
%l = shl <2 x i32> %x, <i32 20, i32 20>
%r = ashr <2 x i32> %l, <i32 20, i32 20>
- store <2 x i32> %r, <2 x i32>* %p
+ store <2 x i32> %r, ptr %p
%and = and <2 x i32> %r, <i32 4095, i32 4095>
ret <2 x i32> %and
}
ret i8 %r
}
-define <2 x i8> @lowmask_add_2_splat(<2 x i8> %x, <2 x i8>* %p) {
+define <2 x i8> @lowmask_add_2_splat(<2 x i8> %x, ptr %p) {
; CHECK-LABEL: @lowmask_add_2_splat(
; CHECK-NEXT: [[A:%.*]] = add <2 x i8> [[X:%.*]], <i8 -64, i8 -64>
-; CHECK-NEXT: store <2 x i8> [[A]], <2 x i8>* [[P:%.*]], align 2
+; CHECK-NEXT: store <2 x i8> [[A]], ptr [[P:%.*]], align 2
; CHECK-NEXT: [[R:%.*]] = and <2 x i8> [[X]], <i8 63, i8 63>
; CHECK-NEXT: ret <2 x i8> [[R]]
;
%a = add <2 x i8> %x, <i8 -64, i8 -64> ; 0xc0
- store <2 x i8> %a, <2 x i8>* %p
+ store <2 x i8> %a, ptr %p
%r = and <2 x i8> %a, <i8 63, i8 63> ; 0x3f
ret <2 x i8> %r
}
ret i8 %r
}
-define <2 x i8> @lowmask_add_splat(<2 x i8> %x, <2 x i8>* %p) {
+define <2 x i8> @lowmask_add_splat(<2 x i8> %x, ptr %p) {
; CHECK-LABEL: @lowmask_add_splat(
; CHECK-NEXT: [[A:%.*]] = add <2 x i8> [[X:%.*]], <i8 -64, i8 -64>
-; CHECK-NEXT: store <2 x i8> [[A]], <2 x i8>* [[P:%.*]], align 2
+; CHECK-NEXT: store <2 x i8> [[A]], ptr [[P:%.*]], align 2
; CHECK-NEXT: [[R:%.*]] = and <2 x i8> [[X]], <i8 32, i8 32>
; CHECK-NEXT: ret <2 x i8> [[R]]
;
%a = add <2 x i8> %x, <i8 -64, i8 -64> ; 0xc0
- store <2 x i8> %a, <2 x i8>* %p
+ store <2 x i8> %a, ptr %p
%r = and <2 x i8> %a, <i8 32, i8 32> ; 0x20
ret <2 x i8> %r
}
-define <2 x i8> @lowmask_add_splat_undef(<2 x i8> %x, <2 x i8>* %p) {
+define <2 x i8> @lowmask_add_splat_undef(<2 x i8> %x, ptr %p) {
; CHECK-LABEL: @lowmask_add_splat_undef(
; CHECK-NEXT: [[A:%.*]] = add <2 x i8> [[X:%.*]], <i8 -64, i8 undef>
-; CHECK-NEXT: store <2 x i8> [[A]], <2 x i8>* [[P:%.*]], align 2
+; CHECK-NEXT: store <2 x i8> [[A]], ptr [[P:%.*]], align 2
; CHECK-NEXT: [[R:%.*]] = and <2 x i8> [[A]], <i8 undef, i8 32>
; CHECK-NEXT: ret <2 x i8> [[R]]
;
%a = add <2 x i8> %x, <i8 -64, i8 undef> ; 0xc0
- store <2 x i8> %a, <2 x i8>* %p
+ store <2 x i8> %a, ptr %p
%r = and <2 x i8> %a, <i8 undef, i8 32> ; 0x20
ret <2 x i8> %r
}
-define <2 x i8> @lowmask_add_vec(<2 x i8> %x, <2 x i8>* %p) {
+define <2 x i8> @lowmask_add_vec(<2 x i8> %x, ptr %p) {
; CHECK-LABEL: @lowmask_add_vec(
; CHECK-NEXT: [[A:%.*]] = add <2 x i8> [[X:%.*]], <i8 -96, i8 -64>
-; CHECK-NEXT: store <2 x i8> [[A]], <2 x i8>* [[P:%.*]], align 2
+; CHECK-NEXT: store <2 x i8> [[A]], ptr [[P:%.*]], align 2
; CHECK-NEXT: [[R:%.*]] = and <2 x i8> [[A]], <i8 16, i8 32>
; CHECK-NEXT: ret <2 x i8> [[R]]
;
%a = add <2 x i8> %x, <i8 -96, i8 -64> ; 0xe0, 0xc0
- store <2 x i8> %a, <2 x i8>* %p
+ store <2 x i8> %a, ptr %p
%r = and <2 x i8> %a, <i8 16, i8 32> ; 0x10, 0x20
ret <2 x i8> %r
}
target datalayout = "e-m:e-p270:32:32-p271:32:32-p272:64:64-i64:64-f80:128-n8:16:32:64-S128"
target triple = "x86_64-grtev4-linux-gnu"
-declare i32 @llvm.annotation.i32(i32, i8*, i8*, i32) #1
+declare i32 @llvm.annotation.i32(i32, ptr, ptr, i32) #1
-define dso_local i32 @annotated(i32* %c) local_unnamed_addr #0 {
+define dso_local i32 @annotated(ptr %c) local_unnamed_addr #0 {
; CHECK-LABEL: @annotated(
; CHECK-NEXT: entry:
-; CHECK-NEXT: [[TMP0:%.*]] = load i32, i32* [[C:%.*]], align 4
-; CHECK-NEXT: [[TMP1:%.*]] = call i32 @llvm.annotation.i32(i32 [[TMP0]], i8* undef, i8* undef, i32 undef)
+; CHECK-NEXT: [[TMP0:%.*]] = load i32, ptr [[C:%.*]], align 4
+; CHECK-NEXT: [[TMP1:%.*]] = call i32 @llvm.annotation.i32(i32 [[TMP0]], ptr undef, ptr undef, i32 undef)
; CHECK-NEXT: [[ADD:%.*]] = add nsw i32 [[TMP1]], [[TMP0]]
; CHECK-NEXT: ret i32 [[ADD]]
;
entry:
- %0 = load i32, i32* %c, align 4
- %1 = call i32 @llvm.annotation.i32(i32 %0, i8* undef, i8* undef, i32 undef)
- %2 = load i32, i32* %c, align 4
+ %0 = load i32, ptr %c, align 4
+ %1 = call i32 @llvm.annotation.i32(i32 %0, ptr undef, ptr undef, i32 undef)
+ %2 = load i32, ptr %c, align 4
%add = add nsw i32 %1, %2
ret i32 %add
}
; Make sure !annotation metadata is added to new instructions, if the source
; instruction has !annotation metadata.
-define i1 @fold_to_new_instruction(i8* %a, i8* %b) {
+define i1 @fold_to_new_instruction(ptr %a, ptr %b) {
; CHECK-LABEL: define {{.+}} @fold_to_new_instruction({{.+}}
-; CHECK-NEXT: [[C:%.*]] = icmp uge i8* [[A:%.*]], [[B:%[a-z]*]], !annotation [[ANN:![0-9]+]]
+; CHECK-NEXT: [[C:%.*]] = icmp uge ptr [[A:%.*]], [[B:%[a-z]*]], !annotation [[ANN:![0-9]+]]
; CHECK-NEXT: ret i1 [[C]]
;
- %a.c = bitcast i8* %a to i32*, !annotation !0
- %b.c = bitcast i8* %b to i32*, !annotation !0
- %c = icmp uge i32* %a.c, %b.c, !annotation !0
+ %c = icmp uge ptr %a, %b, !annotation !0
ret i1 %c
}
; Make sure !annotation is not added to new instructions if the source
; instruction does not have it (even if some folded operands do have
; !annotation).
-define i1 @fold_to_new_instruction2(i8* %a, i8* %b) {
+define i1 @fold_to_new_instruction2(ptr %a, ptr %b) {
; CHECK-LABEL: define {{.+}} @fold_to_new_instruction2({{.+}}
-; CHECK-NEXT: [[C:%.*]] = icmp uge i8* [[A:%.*]], [[B:%[a-z]+]]
+; CHECK-NEXT: [[C:%.*]] = icmp uge ptr [[A:%.*]], [[B:%[a-z]+]]
; CHECK-NEXT: ret i1 [[C]]
;
- %a.c = bitcast i8* %a to i32*, !annotation !0
- %b.c = bitcast i8* %b to i32*, !annotation !0
- %c = icmp uge i32* %a.c, %b.c
+ %c = icmp uge ptr %a, %b
ret i1 %c
}
; memcpy can be expanded inline with load/store. Verify that we keep the
; !annotation metadata.
-declare void @llvm.memcpy.p0i8.p0i8.i32(i8* nocapture, i8* nocapture, i32, i1) nounwind
+declare void @llvm.memcpy.p0.p0.i32(ptr nocapture, ptr nocapture, i32, i1) nounwind
-define void @copy_1_byte(i8* %d, i8* %s) {
+define void @copy_1_byte(ptr %d, ptr %s) {
; CHECK-LABEL: define {{.+}} @copy_1_byte({{.+}}
-; CHECK-NEXT: [[TMP1:%.*]] = load i8, i8* [[S:%.*]], align 1, !annotation [[ANN]]
-; CHECK-NEXT: store i8 [[TMP1]], i8* [[D:%.*]], align 1, !annotation [[ANN]]
+; CHECK-NEXT: [[TMP1:%.*]] = load i8, ptr [[S:%.*]], align 1, !annotation [[ANN]]
+; CHECK-NEXT: store i8 [[TMP1]], ptr [[D:%.*]], align 1, !annotation [[ANN]]
; CHECK-NEXT: ret void
;
- call void @llvm.memcpy.p0i8.p0i8.i32(i8* %d, i8* %s, i32 1, i1 false), !annotation !0
+ call void @llvm.memcpy.p0.p0.i32(ptr %d, ptr %s, i32 1, i1 false), !annotation !0
ret void
}
-declare i8* @memcpy(i8* noalias returned, i8* noalias nocapture readonly, i64) nofree nounwind
+declare ptr @memcpy(ptr noalias returned, ptr noalias nocapture readonly, i64) nofree nounwind
-define void @libcallcopy_1_byte(i8* %d, i8* %s) {
+define void @libcallcopy_1_byte(ptr %d, ptr %s) {
; CHECK-LABEL: define {{.+}} @libcallcopy_1_byte({{.+}}
-; CHECK-NEXT: [[TMP1:%.*]] = load i8, i8* [[S:%.*]], align 1, !annotation [[ANN]]
-; CHECK-NEXT: store i8 [[TMP1]], i8* [[D:%.*]], align 1, !annotation [[ANN]]
+; CHECK-NEXT: [[TMP1:%.*]] = load i8, ptr [[S:%.*]], align 1, !annotation [[ANN]]
+; CHECK-NEXT: store i8 [[TMP1]], ptr [[D:%.*]], align 1, !annotation [[ANN]]
; CHECK-NEXT: ret void
;
- call i8* @memcpy(i8* %d, i8* %s, i64 1), !annotation !0
+ call ptr @memcpy(ptr %d, ptr %s, i64 1), !annotation !0
ret void
}
-declare i8* @__memcpy_chk(i8*, i8*, i64, i64) nofree nounwind
+declare ptr @__memcpy_chk(ptr, ptr, i64, i64) nofree nounwind
-define void @libcallcopy_1_byte_chk(i8* %d, i8* %s) {
+define void @libcallcopy_1_byte_chk(ptr %d, ptr %s) {
; CHECK-LABEL: define {{.+}} @libcallcopy_1_byte_chk({{.+}}
-; CHECK-NEXT: [[TMP1:%.*]] = load i8, i8* [[S:%.*]], align 1, !annotation [[ANN]]
-; CHECK-NEXT: store i8 [[TMP1]], i8* [[D:%.*]], align 1, !annotation [[ANN]]
+; CHECK-NEXT: [[TMP1:%.*]] = load i8, ptr [[S:%.*]], align 1, !annotation [[ANN]]
+; CHECK-NEXT: store i8 [[TMP1]], ptr [[D:%.*]], align 1, !annotation [[ANN]]
; CHECK-NEXT: ret void
;
- call i8* @__memcpy_chk(i8* %d, i8* %s, i64 1, i64 1), !annotation !0
+ call ptr @__memcpy_chk(ptr %d, ptr %s, i64 1, i64 1), !annotation !0
ret void
}
-declare void @llvm.memmove.p0i8.p0i8.i32(i8* nocapture, i8* nocapture readonly, i32, i1) nounwind
+declare void @llvm.memmove.p0.p0.i32(ptr nocapture, ptr nocapture readonly, i32, i1) nounwind
-define void @move_1_byte(i8* %d, i8* %s) {
+define void @move_1_byte(ptr %d, ptr %s) {
; CHECK-LABEL: define {{.+}} @move_1_byte({{.+}}
-; CHECK-NEXT: [[TMP1:%.*]] = load i8, i8* [[S:%.*]], align 1, !annotation [[ANN]]
-; CHECK-NEXT: store i8 [[TMP1]], i8* [[D:%.*]], align 1, !annotation [[ANN]]
+; CHECK-NEXT: [[TMP1:%.*]] = load i8, ptr [[S:%.*]], align 1, !annotation [[ANN]]
+; CHECK-NEXT: store i8 [[TMP1]], ptr [[D:%.*]], align 1, !annotation [[ANN]]
; CHECK-NEXT: ret void
;
- call void @llvm.memmove.p0i8.p0i8.i32(i8* %d, i8* %s, i32 1, i1 false), !annotation !0
+ call void @llvm.memmove.p0.p0.i32(ptr %d, ptr %s, i32 1, i1 false), !annotation !0
ret void
}
-declare i8* @memmove(i8* returned, i8* nocapture readonly, i64) nofree nounwind
+declare ptr @memmove(ptr returned, ptr nocapture readonly, i64) nofree nounwind
-define void @libcallmove_1_byte(i8* %d, i8* %s) {
+define void @libcallmove_1_byte(ptr %d, ptr %s) {
; CHECK-LABEL: define {{.+}} @libcallmove_1_byte({{.+}}
-; CHECK-NEXT: [[TMP1:%.*]] = load i8, i8* [[S:%.*]], align 1, !annotation [[ANN]]
-; CHECK-NEXT: store i8 [[TMP1]], i8* [[D:%.*]], align 1, !annotation [[ANN]]
+; CHECK-NEXT: [[TMP1:%.*]] = load i8, ptr [[S:%.*]], align 1, !annotation [[ANN]]
+; CHECK-NEXT: store i8 [[TMP1]], ptr [[D:%.*]], align 1, !annotation [[ANN]]
; CHECK-NEXT: ret void
;
- call i8* @memmove(i8* %d, i8* %s, i64 1), !annotation !0
+ call ptr @memmove(ptr %d, ptr %s, i64 1), !annotation !0
ret void
}
-declare i8* @__memmove_chk(i8*, i8*, i64, i64) nofree nounwind
+declare ptr @__memmove_chk(ptr, ptr, i64, i64) nofree nounwind
-define void @libcallmove_1_byte_chk(i8* %d, i8* %s) {
+define void @libcallmove_1_byte_chk(ptr %d, ptr %s) {
; CHECK-LABEL: define {{.+}} @libcallmove_1_byte_chk({{.+}}
-; CHECK-NEXT: [[TMP1:%.*]] = load i8, i8* [[S:%.*]], align 1, !annotation [[ANN]]
-; CHECK-NEXT: store i8 [[TMP1]], i8* [[D:%.*]], align 1, !annotation [[ANN]]
+; CHECK-NEXT: [[TMP1:%.*]] = load i8, ptr [[S:%.*]], align 1, !annotation [[ANN]]
+; CHECK-NEXT: store i8 [[TMP1]], ptr [[D:%.*]], align 1, !annotation [[ANN]]
; CHECK-NEXT: ret void
;
- call i8* @__memmove_chk(i8* %d, i8* %s, i64 1, i64 1), !annotation !0
+ call ptr @__memmove_chk(ptr %d, ptr %s, i64 1, i64 1), !annotation !0
ret void
}
-declare void @llvm.memset.p0i8.i32(i8* nocapture writeonly, i8, i32, i1) argmemonly nounwind
+declare void @llvm.memset.p0.i32(ptr nocapture writeonly, i8, i32, i1) argmemonly nounwind
-define void @set_1_byte(i8* %d) {
+define void @set_1_byte(ptr %d) {
; CHECK-LABEL: define {{.+}} @set_1_byte({{.+}}
-; CHECK-NEXT: store i8 1, i8* [[D:%.*]], align 1, !annotation [[ANN]]
+; CHECK-NEXT: store i8 1, ptr [[D:%.*]], align 1, !annotation [[ANN]]
; CHECK-NEXT: ret void
;
- call void @llvm.memset.p0i8.i32(i8* %d, i8 1, i32 1, i1 false), !annotation !0
+ call void @llvm.memset.p0.i32(ptr %d, i8 1, i32 1, i1 false), !annotation !0
ret void
}
-declare i8* @memset(i8*, i32, i64) nofree
+declare ptr @memset(ptr, i32, i64) nofree
-define void @libcall_set_1_byte(i8* %d) {
+define void @libcall_set_1_byte(ptr %d) {
; CHECK-LABEL: define {{.+}} @libcall_set_1_byte({{.+}}
-; CHECK-NEXT: store i8 1, i8* [[D:%.*]], align 1, !annotation [[ANN]]
+; CHECK-NEXT: store i8 1, ptr [[D:%.*]], align 1, !annotation [[ANN]]
; CHECK-NEXT: ret void
;
- call i8* @memset(i8* %d, i32 1, i64 1), !annotation !0
+ call ptr @memset(ptr %d, i32 1, i64 1), !annotation !0
ret void
}
-declare i8* @__memset_chk(i8*, i32, i64, i64) nofree
+declare ptr @__memset_chk(ptr, i32, i64, i64) nofree
-define void @libcall_set_1_byte_chk(i8* %d) {
+define void @libcall_set_1_byte_chk(ptr %d) {
; CHECK-LABEL: define {{.+}} @libcall_set_1_byte_chk({{.+}}
-; CHECK-NEXT: store i8 1, i8* [[D:%.*]], align 1, !annotation [[ANN]]
+; CHECK-NEXT: store i8 1, ptr [[D:%.*]], align 1, !annotation [[ANN]]
; CHECK-NEXT: ret void
;
- call i8* @__memset_chk(i8* %d, i32 1, i64 1, i64 1), !annotation !0
+ call ptr @__memset_chk(ptr %d, i32 1, i64 1, i64 1), !annotation !0
ret void
}
ret i1 %B
}
-define i7 @test5(i7 %A, i7* %P) {
+define i7 @test5(i7 %A, ptr %P) {
; CHECK-LABEL: @test5(
; CHECK-NEXT: [[TMP1:%.*]] = and i7 [[A:%.*]], -4
; CHECK-NEXT: [[C:%.*]] = xor i7 [[TMP1]], 15
-; CHECK-NEXT: store i7 [[C]], i7* [[P:%.*]], align 1
+; CHECK-NEXT: store i7 [[C]], ptr [[P:%.*]], align 1
; CHECK-NEXT: ret i7 3
;
%B = or i7 %A, 3
%C = xor i7 %B, 12
- store i7 %C, i7* %P
+ store i7 %C, ptr %P
%r = and i7 %C, 3
ret i7 %r
}
ret i1 %B
}
-define i117 @test12(i117 %A, i117* %P) {
+define i117 @test12(i117 %A, ptr %P) {
; CHECK-LABEL: @test12(
; CHECK-NEXT: [[TMP1:%.*]] = and i117 [[A:%.*]], -4
; CHECK-NEXT: [[C:%.*]] = xor i117 [[TMP1]], 15
-; CHECK-NEXT: store i117 [[C]], i117* [[P:%.*]], align 4
+; CHECK-NEXT: store i117 [[C]], ptr [[P:%.*]], align 4
; CHECK-NEXT: ret i117 3
;
%B = or i117 %A, 3
%C = xor i117 %B, 12
- store i117 %C, i117* %P
+ store i117 %C, ptr %P
%r = and i117 %C, 3
ret i117 %r
}
target triple = "i686-pc-linux-gnu"
declare i32 @main2()
-declare i7* @ctime2(i999*)
+declare ptr @ctime2(ptr)
-define i7* @ctime(i999*) {
+define ptr @ctime(ptr) {
; CHECK-LABEL: @ctime(
; CHECK-NEXT: entry:
; CHECK-NEXT: [[I0:%.*]] = call i32 @main2()
-; CHECK-NEXT: [[TMP1:%.*]] = inttoptr i32 [[I0]] to i7*
-; CHECK-NEXT: ret i7* [[TMP1]]
+; CHECK-NEXT: [[TMP1:%.*]] = inttoptr i32 [[I0]] to ptr
+; CHECK-NEXT: ret ptr [[TMP1]]
;
entry:
- %i0 = call i7* bitcast (i32 ()* @main2 to i7* ()*)( )
- ret i7* %i0
+ %i0 = call ptr @main2( )
+ ret ptr %i0
}
define i32 @main() {
; CHECK-LABEL: @main(
; CHECK-NEXT: entry:
-; CHECK-NEXT: [[I1:%.*]] = call i7* @ctime2(i999* null)
-; CHECK-NEXT: [[TMP0:%.*]] = ptrtoint i7* [[I1]] to i32
+; CHECK-NEXT: [[I1:%.*]] = call ptr @ctime2(ptr null)
+; CHECK-NEXT: [[TMP0:%.*]] = ptrtoint ptr [[I1]] to i32
; CHECK-NEXT: ret i32 [[TMP0]]
;
entry:
- %i1 = call i32 bitcast (i7* (i999*)* @ctime2 to i32 (i99*)*)( i99* null )
+ %i1 = call i32 @ctime2( ptr null )
ret i32 %i1
}
define i177 @ossfuzz_9880(i177 %X) {
; CHECK-LABEL: @ossfuzz_9880(
; CHECK-NEXT: [[A:%.*]] = alloca i177, align 8
-; CHECK-NEXT: [[L1:%.*]] = load i177, i177* [[A]], align 8
+; CHECK-NEXT: [[L1:%.*]] = load i177, ptr [[A]], align 8
; CHECK-NEXT: [[TMP1:%.*]] = icmp eq i177 [[L1]], -1
; CHECK-NEXT: [[B5_NEG:%.*]] = sext i1 [[TMP1]] to i177
; CHECK-NEXT: [[B14:%.*]] = add i177 [[L1]], [[B5_NEG]]
; CHECK-NEXT: ret i177 [[B1]]
;
%A = alloca i177
- %L1 = load i177, i177* %A
+ %L1 = load i177, ptr %A
%B = or i177 0, -1
%B5 = udiv i177 %L1, %B
%B4 = add i177 %B5, %B
; negative test - one-use
-define i32 @lshr_sub_nsw_extra_use(i32 %x, i32 %y, i32* %p) {
+define i32 @lshr_sub_nsw_extra_use(i32 %x, i32 %y, ptr %p) {
; CHECK-LABEL: @lshr_sub_nsw_extra_use(
; CHECK-NEXT: [[SUB:%.*]] = sub nsw i32 [[X:%.*]], [[Y:%.*]]
-; CHECK-NEXT: store i32 [[SUB]], i32* [[P:%.*]], align 4
+; CHECK-NEXT: store i32 [[SUB]], ptr [[P:%.*]], align 4
; CHECK-NEXT: [[SHR:%.*]] = lshr i32 [[SUB]], 31
; CHECK-NEXT: ret i32 [[SHR]]
;
%sub = sub nsw i32 %x, %y
- store i32 %sub, i32* %p
+ store i32 %sub, ptr %p
%shr = lshr i32 %sub, 31
ret i32 %shr
}
; negative test - one-use
-define i32 @ashr_sub_nsw_extra_use(i32 %x, i32 %y, i32* %p) {
+define i32 @ashr_sub_nsw_extra_use(i32 %x, i32 %y, ptr %p) {
; CHECK-LABEL: @ashr_sub_nsw_extra_use(
; CHECK-NEXT: [[SUB:%.*]] = sub nsw i32 [[X:%.*]], [[Y:%.*]]
-; CHECK-NEXT: store i32 [[SUB]], i32* [[P:%.*]], align 4
+; CHECK-NEXT: store i32 [[SUB]], ptr [[P:%.*]], align 4
; CHECK-NEXT: [[SHR:%.*]] = ashr i32 [[SUB]], 31
; CHECK-NEXT: ret i32 [[SHR]]
;
%sub = sub nsw i32 %x, %y
- store i32 %sub, i32* %p
+ store i32 %sub, ptr %p
%shr = ashr i32 %sub, 31
ret i32 %shr
}
declare void @llvm.assume(i1 noundef)
-define void @f1(i8* %a) {
+define void @f1(ptr %a) {
; CHECK-LABEL: @f1(
; CHECK-NEXT: entry:
-; CHECK-NEXT: [[PTR:%.*]] = getelementptr inbounds i8, i8* [[A:%.*]], i64 4
-; CHECK-NEXT: [[TMP0:%.*]] = ptrtoint i8* [[PTR]] to i64
+; CHECK-NEXT: [[PTR:%.*]] = getelementptr inbounds i8, ptr [[A:%.*]], i64 4
+; CHECK-NEXT: [[TMP0:%.*]] = ptrtoint ptr [[PTR]] to i64
; CHECK-NEXT: [[TMP1:%.*]] = and i64 [[TMP0]], 3
; CHECK-NEXT: [[TMP2:%.*]] = icmp eq i64 [[TMP1]], 0
; CHECK-NEXT: br i1 [[TMP2]], label [[IF_THEN:%.*]], label [[IF_END:%.*]]
; CHECK: if.then:
-; CHECK-NEXT: call void @llvm.assume(i1 true) [ "align"(i8* [[PTR]], i64 4) ]
-; CHECK-NEXT: [[TMP3:%.*]] = bitcast i8* [[PTR]] to i32*
-; CHECK-NEXT: store i32 4, i32* [[TMP3]], align 4
+; CHECK-NEXT: call void @llvm.assume(i1 true) [ "align"(ptr [[PTR]], i64 4) ]
+; CHECK-NEXT: store i32 4, ptr [[PTR]], align 4
; CHECK-NEXT: br label [[IF_END]]
; CHECK: if.end:
; CHECK-NEXT: ret void
;
entry:
- %ptr = getelementptr inbounds i8, i8* %a, i64 4
- %0 = ptrtoint i8* %ptr to i64
+ %ptr = getelementptr inbounds i8, ptr %a, i64 4
+ %0 = ptrtoint ptr %ptr to i64
%1 = and i64 %0, 3
%2 = icmp eq i64 %1, 0
br i1 %2, label %if.then, label %if.end
if.then: ; preds = %entry
- call void @llvm.assume(i1 true) [ "align"(i8* %ptr, i64 4) ]
- %3 = ptrtoint i8* %ptr to i64
+ call void @llvm.assume(i1 true) [ "align"(ptr %ptr, i64 4) ]
+ %3 = ptrtoint ptr %ptr to i64
%4 = and i64 %3, 3
%5 = icmp eq i64 %4, 0
br i1 %5, label %if.then1, label %if.else1
if.then1: ; preds = %if.then
- %6 = bitcast i8* %ptr to i32*
- store i32 4, i32* %6, align 4
+ store i32 4, ptr %ptr, align 4
br label %if.end
if.else1: ; preds = %if.then
- store i8 1, i8* %ptr, align 1
+ store i8 1, ptr %ptr, align 1
br label %if.end
if.end: ; preds = %if.then1, %if.else1, %entry
; TODO: We could fold away the branch "br i1 %3, ..." by either using a GEP or make getKnowledgeValidInContext aware the alignment bundle offset, and the improvement of value tracking of GEP.
-define void @f2(i8* %a) {
+define void @f2(ptr %a) {
; CHECK-LABEL: @f2(
; CHECK-NEXT: entry:
-; CHECK-NEXT: call void @llvm.assume(i1 true) [ "align"(i8* [[A:%.*]], i64 32, i32 24) ]
-; CHECK-NEXT: [[TMP0:%.*]] = getelementptr inbounds i8, i8* [[A]], i64 8
-; CHECK-NEXT: [[TMP1:%.*]] = ptrtoint i8* [[TMP0]] to i64
+; CHECK-NEXT: call void @llvm.assume(i1 true) [ "align"(ptr [[A:%.*]], i64 32, i32 24) ]
+; CHECK-NEXT: [[TMP0:%.*]] = getelementptr inbounds i8, ptr [[A]], i64 8
+; CHECK-NEXT: [[TMP1:%.*]] = ptrtoint ptr [[TMP0]] to i64
; CHECK-NEXT: [[TMP2:%.*]] = and i64 [[TMP1]], 8
; CHECK-NEXT: [[TMP3:%.*]] = icmp eq i64 [[TMP2]], 0
; CHECK-NEXT: br i1 [[TMP3]], label [[IF_THEN:%.*]], label [[IF_ELSE:%.*]]
; CHECK: if.then:
-; CHECK-NEXT: [[TMP4:%.*]] = bitcast i8* [[TMP0]] to i64*
-; CHECK-NEXT: store i64 16, i64* [[TMP4]], align 8
+; CHECK-NEXT: store i64 16, ptr [[TMP0]], align 8
; CHECK-NEXT: br label [[IF_END:%.*]]
; CHECK: if.else:
-; CHECK-NEXT: store i8 1, i8* [[TMP0]], align 8
+; CHECK-NEXT: store i8 1, ptr [[TMP0]], align 8
; CHECK-NEXT: br label [[IF_END]]
; CHECK: if.end:
; CHECK-NEXT: ret void
;
entry:
- call void @llvm.assume(i1 true) [ "align"(i8* %a, i64 32, i32 24) ]
- %0 = getelementptr inbounds i8, i8* %a, i64 8
- %1 = ptrtoint i8* %0 to i64
+ call void @llvm.assume(i1 true) [ "align"(ptr %a, i64 32, i32 24) ]
+ %0 = getelementptr inbounds i8, ptr %a, i64 8
+ %1 = ptrtoint ptr %0 to i64
%2 = and i64 %1, 15
%3 = icmp eq i64 %2, 0
br i1 %3, label %if.then, label %if.else
if.then: ; preds = %entry
- %4 = bitcast i8* %0 to i64*
- store i64 16, i64* %4, align 4
+ store i64 16, ptr %0, align 4
br label %if.end
if.else: ; preds = %entry
- store i8 1, i8* %0, align 1
+ store i8 1, ptr %0, align 1
br label %if.end
if.end: ; preds = %if.else, %if.then
ret void
}
-define void @f3(i64 %a, i8* %b) {
+define void @f3(i64 %a, ptr %b) {
; CHECK-LABEL: @f3(
-; CHECK-NEXT: [[C:%.*]] = ptrtoint i8* [[B:%.*]] to i64
-; CHECK-NEXT: call void @llvm.assume(i1 true) [ "align"(i8* [[B]], i64 4294967296) ]
+; CHECK-NEXT: [[C:%.*]] = ptrtoint ptr [[B:%.*]] to i64
+; CHECK-NEXT: call void @llvm.assume(i1 true) [ "align"(ptr [[B]], i64 4294967296) ]
; CHECK-NEXT: [[D:%.*]] = add i64 [[C]], [[A:%.*]]
; CHECK-NEXT: call void @g(i64 [[D]])
; CHECK-NEXT: ret void
;
- %c = ptrtoint i8* %b to i64
- call void @llvm.assume(i1 true) [ "align"(i8* %b, i64 4294967296) ]
+ %c = ptrtoint ptr %b to i64
+ call void @llvm.assume(i1 true) [ "align"(ptr %b, i64 4294967296) ]
%d = add i64 %a, %c
call void @g(i64 %d)
ret void
declare void @g(i64)
-define i8 @assume_align_zero(i8* %p) {
+define i8 @assume_align_zero(ptr %p) {
; CHECK-LABEL: @assume_align_zero(
-; CHECK-NEXT: call void @llvm.assume(i1 true) [ "align"(i8* [[P:%.*]], i64 0) ]
-; CHECK-NEXT: [[V:%.*]] = load i8, i8* [[P]], align 1
+; CHECK-NEXT: call void @llvm.assume(i1 true) [ "align"(ptr [[P:%.*]], i64 0) ]
+; CHECK-NEXT: [[V:%.*]] = load i8, ptr [[P]], align 1
; CHECK-NEXT: ret i8 [[V]]
;
- call void @llvm.assume(i1 true) [ "align"(i8* %p, i64 0) ]
- %v = load i8, i8* %p
+ call void @llvm.assume(i1 true) [ "align"(ptr %p, i64 0) ]
+ %v = load i8, ptr %p
ret i8 %v
}
-define i8 @assume_align_non_pow2(i8* %p) {
+define i8 @assume_align_non_pow2(ptr %p) {
; CHECK-LABEL: @assume_align_non_pow2(
-; CHECK-NEXT: call void @llvm.assume(i1 true) [ "align"(i8* [[P:%.*]], i64 123) ]
-; CHECK-NEXT: [[V:%.*]] = load i8, i8* [[P]], align 1
+; CHECK-NEXT: call void @llvm.assume(i1 true) [ "align"(ptr [[P:%.*]], i64 123) ]
+; CHECK-NEXT: [[V:%.*]] = load i8, ptr [[P]], align 1
; CHECK-NEXT: ret i8 [[V]]
;
- call void @llvm.assume(i1 true) [ "align"(i8* %p, i64 123) ]
- %v = load i8, i8* %p
+ call void @llvm.assume(i1 true) [ "align"(ptr %p, i64 123) ]
+ %v = load i8, ptr %p
ret i8 %v
}
target datalayout = "e-m:e-p270:32:32-p271:32:32-p272:64:64-i64:64-f80:128-n8:16:32:64-S128"
target triple = "x86_64-unknown-linux-gnu"
-define i8* @example(i8* dereferenceable(24) %x) {
+define ptr @example(ptr dereferenceable(24) %x) {
; CHECK-LABEL: @example(
-; CHECK-NEXT: [[X2:%.*]] = bitcast i8* [[X:%.*]] to {}**
-; CHECK-NEXT: [[Y:%.*]] = load {}*, {}** [[X2]], align 8
-; CHECK-NEXT: [[Y_IS_NULL:%.*]] = icmp ne {}* [[Y]], null
+; CHECK-NEXT: [[Y:%.*]] = load ptr, ptr [[X:%.*]], align 8
+; CHECK-NEXT: [[Y_IS_NULL:%.*]] = icmp ne ptr [[Y]], null
; CHECK-NEXT: call void @llvm.assume(i1 [[Y_IS_NULL]])
-; CHECK-NEXT: ret i8* [[X]]
+; CHECK-NEXT: ret ptr [[X]]
;
- %x2 = bitcast i8* %x to {}**
- %y = load {}*, {}** %x2, align 8
- %y_is_null = icmp eq {}* %y, null
+ %y = load ptr, ptr %x, align 8
+ %y_is_null = icmp eq ptr %y, null
- %x0 = getelementptr inbounds i8, i8* %x, i64 0
- %res = select i1 %y_is_null, i8* null, i8* %x0
+ %res = select i1 %y_is_null, ptr null, ptr %x
- %nonnull = icmp ne i8* %res, null
+ %nonnull = icmp ne ptr %res, null
call void @llvm.assume(i1 %nonnull)
- ret i8* %res
+ ret ptr %res
}
-; TODO: this should be folded to `ret i8* %x` as well.
-define i8* @example2(i8* %x) {
+; TODO: this should be folded to `ret ptr %x` as well.
+define ptr @example2(ptr %x) {
; CHECK-LABEL: @example2(
-; CHECK-NEXT: [[X2:%.*]] = bitcast i8* [[X:%.*]] to {}**
-; CHECK-NEXT: [[Y:%.*]] = load {}*, {}** [[X2]], align 8
-; CHECK-NEXT: [[Y_IS_NULL:%.*]] = icmp eq {}* [[Y]], null
-; CHECK-NEXT: [[RES:%.*]] = select i1 [[Y_IS_NULL]], i8* null, i8* [[X]]
-; CHECK-NEXT: [[NONNULL:%.*]] = icmp ne i8* [[RES]], null
+; CHECK-NEXT: [[Y:%.*]] = load ptr, ptr [[X:%.*]], align 8
+; CHECK-NEXT: [[Y_IS_NULL:%.*]] = icmp eq ptr [[Y]], null
+; CHECK-NEXT: [[RES:%.*]] = select i1 [[Y_IS_NULL]], ptr null, ptr [[X]]
+; CHECK-NEXT: [[NONNULL:%.*]] = icmp ne ptr [[RES]], null
; CHECK-NEXT: call void @llvm.assume(i1 [[NONNULL]])
-; CHECK-NEXT: ret i8* [[RES]]
+; CHECK-NEXT: ret ptr [[RES]]
;
- %x2 = bitcast i8* %x to {}**
- %y = load {}*, {}** %x2, align 8
- %y_is_null = icmp eq {}* %y, null
+ %y = load ptr, ptr %x, align 8
+ %y_is_null = icmp eq ptr %y, null
- %x0 = getelementptr inbounds i8, i8* %x, i64 0
- %res = select i1 %y_is_null, i8* null, i8* %x0
+ %res = select i1 %y_is_null, ptr null, ptr %x
- %nonnull = icmp ne i8* %res, null
+ %nonnull = icmp ne ptr %res, null
call void @llvm.assume(i1 %nonnull)
- ret i8* %res
+ ret ptr %res
}
declare void @llvm.assume(i1)
target triple = "x86_64-unknown-linux-gnu"
; Function Attrs: nounwind uwtable
-define void @foo(i32* %a, i32* %b) #0 {
+define void @foo(ptr %a, ptr %b) #0 {
; CHECK-LABEL: @foo(
; CHECK-NEXT: entry:
-; CHECK-NEXT: [[PTRINT:%.*]] = ptrtoint i32* [[A:%.*]] to i64
+; CHECK-NEXT: [[PTRINT:%.*]] = ptrtoint ptr [[A:%.*]] to i64
; CHECK-NEXT: [[MASKEDPTR:%.*]] = and i64 [[PTRINT]], 63
; CHECK-NEXT: [[MASKCOND:%.*]] = icmp eq i64 [[MASKEDPTR]], 0
; CHECK-NEXT: tail call void @llvm.assume(i1 [[MASKCOND]])
-; CHECK-NEXT: [[PTRINT1:%.*]] = ptrtoint i32* [[B:%.*]] to i64
+; CHECK-NEXT: [[PTRINT1:%.*]] = ptrtoint ptr [[B:%.*]] to i64
; CHECK-NEXT: [[MASKEDPTR2:%.*]] = and i64 [[PTRINT1]], 63
; CHECK-NEXT: [[MASKCOND3:%.*]] = icmp eq i64 [[MASKEDPTR2]], 0
; CHECK-NEXT: tail call void @llvm.assume(i1 [[MASKCOND3]])
; CHECK-NEXT: br label [[FOR_BODY:%.*]]
; CHECK: for.body:
; CHECK-NEXT: [[INDVARS_IV:%.*]] = phi i64 [ 0, [[ENTRY:%.*]] ], [ [[INDVARS_IV_NEXT:%.*]], [[FOR_BODY]] ]
-; CHECK-NEXT: [[ARRAYIDX:%.*]] = getelementptr inbounds i32, i32* [[B]], i64 [[INDVARS_IV]]
-; CHECK-NEXT: [[TMP0:%.*]] = load i32, i32* [[ARRAYIDX]], align 64
+; CHECK-NEXT: [[ARRAYIDX:%.*]] = getelementptr inbounds i32, ptr [[B]], i64 [[INDVARS_IV]]
+; CHECK-NEXT: [[TMP0:%.*]] = load i32, ptr [[ARRAYIDX]], align 64
; CHECK-NEXT: [[ADD:%.*]] = add nsw i32 [[TMP0]], 1
-; CHECK-NEXT: [[ARRAYIDX5:%.*]] = getelementptr inbounds i32, i32* [[A]], i64 [[INDVARS_IV]]
-; CHECK-NEXT: store i32 [[ADD]], i32* [[ARRAYIDX5]], align 64
+; CHECK-NEXT: [[ARRAYIDX5:%.*]] = getelementptr inbounds i32, ptr [[A]], i64 [[INDVARS_IV]]
+; CHECK-NEXT: store i32 [[ADD]], ptr [[ARRAYIDX5]], align 64
; CHECK-NEXT: [[INDVARS_IV_NEXT]] = add nuw nsw i64 [[INDVARS_IV]], 16
; CHECK-NEXT: [[TMP1:%.*]] = trunc i64 [[INDVARS_IV_NEXT]] to i32
; CHECK-NEXT: [[CMP:%.*]] = icmp slt i32 [[TMP1]], 1648
; CHECK-NEXT: ret void
;
entry:
- %ptrint = ptrtoint i32* %a to i64
+ %ptrint = ptrtoint ptr %a to i64
%maskedptr = and i64 %ptrint, 63
%maskcond = icmp eq i64 %maskedptr, 0
tail call void @llvm.assume(i1 %maskcond)
- %ptrint1 = ptrtoint i32* %b to i64
+ %ptrint1 = ptrtoint ptr %b to i64
%maskedptr2 = and i64 %ptrint1, 63
%maskcond3 = icmp eq i64 %maskedptr2, 0
tail call void @llvm.assume(i1 %maskcond3)
for.body: ; preds = %entry, %for.body
%indvars.iv = phi i64 [ 0, %entry ], [ %indvars.iv.next, %for.body ]
- %arrayidx = getelementptr inbounds i32, i32* %b, i64 %indvars.iv
- %0 = load i32, i32* %arrayidx, align 4
+ %arrayidx = getelementptr inbounds i32, ptr %b, i64 %indvars.iv
+ %0 = load i32, ptr %arrayidx, align 4
%add = add nsw i32 %0, 1
- %arrayidx5 = getelementptr inbounds i32, i32* %a, i64 %indvars.iv
- store i32 %add, i32* %arrayidx5, align 4
+ %arrayidx5 = getelementptr inbounds i32, ptr %a, i64 %indvars.iv
+ store i32 %add, ptr %arrayidx5, align 4
%indvars.iv.next = add nuw nsw i64 %indvars.iv, 16
%1 = trunc i64 %indvars.iv.next to i32
%cmp = icmp slt i32 %1, 1648
target datalayout = "e-m:e-i64:64-f80:128-n8:16:32:64-S128"
target triple = "x86_64-unknown-linux-gnu"
-%struct.s = type { double* }
+%struct.s = type { ptr }
; Function Attrs: nounwind uwtable
-define void @_Z3fooR1s(%struct.s* nocapture readonly dereferenceable(8) %x) #0 {
+define void @_Z3fooR1s(ptr nocapture readonly dereferenceable(8) %x) #0 {
; CHECK-LABEL: @_Z3fooR1s
; CHECK: call void @llvm.assume
; CHECK-NOT: call void @llvm.assume
entry:
- %a = getelementptr inbounds %struct.s, %struct.s* %x, i64 0, i32 0
- %0 = load double*, double** %a, align 8
- %ptrint = ptrtoint double* %0 to i64
+ %0 = load ptr, ptr %x, align 8
+ %ptrint = ptrtoint ptr %0 to i64
%maskedptr = and i64 %ptrint, 31
%maskcond = icmp eq i64 %maskedptr, 0
br label %for.body
for.body: ; preds = %for.body, %entry
%indvars.iv = phi i64 [ 0, %entry ], [ %indvars.iv.next.1, %for.body ]
tail call void @llvm.assume(i1 %maskcond)
- %arrayidx = getelementptr inbounds double, double* %0, i64 %indvars.iv
- %1 = load double, double* %arrayidx, align 16
+ %arrayidx = getelementptr inbounds double, ptr %0, i64 %indvars.iv
+ %1 = load double, ptr %arrayidx, align 16
%add = fadd double %1, 1.000000e+00
tail call void @llvm.assume(i1 %maskcond)
%mul = fmul double %add, 2.000000e+00
- store double %mul, double* %arrayidx, align 16
+ store double %mul, ptr %arrayidx, align 16
%indvars.iv.next = add nuw nsw i64 %indvars.iv, 1
tail call void @llvm.assume(i1 %maskcond)
- %arrayidx.1 = getelementptr inbounds double, double* %0, i64 %indvars.iv.next
- %2 = load double, double* %arrayidx.1, align 8
+ %arrayidx.1 = getelementptr inbounds double, ptr %0, i64 %indvars.iv.next
+ %2 = load double, ptr %arrayidx.1, align 8
%add.1 = fadd double %2, 1.000000e+00
tail call void @llvm.assume(i1 %maskcond)
%mul.1 = fmul double %add.1, 2.000000e+00
- store double %mul.1, double* %arrayidx.1, align 8
+ store double %mul.1, ptr %arrayidx.1, align 8
%indvars.iv.next.1 = add nuw nsw i64 %indvars.iv.next, 1
%exitcond.1 = icmp eq i64 %indvars.iv.next, 1599
br i1 %exitcond.1, label %for.end, label %for.body
ret void
}
-declare align 8 i8* @get()
+declare align 8 ptr @get()
; Check that redundant align assume is removed
; CHECK-LABEL: @test
; CHECK-NOT: call void @llvm.assume
define void @test1() {
- %p = call align 8 i8* @get()
- %ptrint = ptrtoint i8* %p to i64
+ %p = call align 8 ptr @get()
+ %ptrint = ptrtoint ptr %p to i64
%maskedptr = and i64 %ptrint, 7
%maskcond = icmp eq i64 %maskedptr, 0
call void @llvm.assume(i1 %maskcond)
; CHECK-NOT: call void @llvm.assume
define void @test3() {
%p = alloca i8, align 8
- %ptrint = ptrtoint i8* %p to i64
+ %ptrint = ptrtoint ptr %p to i64
%maskedptr = and i64 %ptrint, 7
%maskcond = icmp eq i64 %maskedptr, 0
call void @llvm.assume(i1 %maskcond)
; Check that the alignment has been upgraded and that the assume has not
; been removed:
-define i32 @foo1(i32* %a) #0 {
+define i32 @foo1(ptr %a) #0 {
; DEFAULT-LABEL: @foo1(
-; DEFAULT-NEXT: [[T0:%.*]] = load i32, i32* [[A:%.*]], align 32
-; DEFAULT-NEXT: [[PTRINT:%.*]] = ptrtoint i32* [[A]] to i64
+; DEFAULT-NEXT: [[T0:%.*]] = load i32, ptr [[A:%.*]], align 32
+; DEFAULT-NEXT: [[PTRINT:%.*]] = ptrtoint ptr [[A]] to i64
; DEFAULT-NEXT: [[MASKEDPTR:%.*]] = and i64 [[PTRINT]], 31
; DEFAULT-NEXT: [[MASKCOND:%.*]] = icmp eq i64 [[MASKEDPTR]], 0
; DEFAULT-NEXT: tail call void @llvm.assume(i1 [[MASKCOND]])
; DEFAULT-NEXT: ret i32 [[T0]]
;
; BUNDLES-LABEL: @foo1(
-; BUNDLES-NEXT: [[T0:%.*]] = load i32, i32* [[A:%.*]], align 32
-; BUNDLES-NEXT: call void @llvm.assume(i1 true) [ "align"(i32* [[A]], i64 32) ]
+; BUNDLES-NEXT: [[T0:%.*]] = load i32, ptr [[A:%.*]], align 32
+; BUNDLES-NEXT: call void @llvm.assume(i1 true) [ "align"(ptr [[A]], i64 32) ]
; BUNDLES-NEXT: ret i32 [[T0]]
;
- %t0 = load i32, i32* %a, align 4
- %ptrint = ptrtoint i32* %a to i64
+ %t0 = load i32, ptr %a, align 4
+ %ptrint = ptrtoint ptr %a to i64
%maskedptr = and i64 %ptrint, 31
%maskcond = icmp eq i64 %maskedptr, 0
tail call void @llvm.assume(i1 %maskcond)
; Same check as in @foo1, but make sure it works if the assume is first too.
-define i32 @foo2(i32* %a) #0 {
+define i32 @foo2(ptr %a) #0 {
; DEFAULT-LABEL: @foo2(
-; DEFAULT-NEXT: [[PTRINT:%.*]] = ptrtoint i32* [[A:%.*]] to i64
+; DEFAULT-NEXT: [[PTRINT:%.*]] = ptrtoint ptr [[A:%.*]] to i64
; DEFAULT-NEXT: [[MASKEDPTR:%.*]] = and i64 [[PTRINT]], 31
; DEFAULT-NEXT: [[MASKCOND:%.*]] = icmp eq i64 [[MASKEDPTR]], 0
; DEFAULT-NEXT: tail call void @llvm.assume(i1 [[MASKCOND]])
-; DEFAULT-NEXT: [[T0:%.*]] = load i32, i32* [[A]], align 32
+; DEFAULT-NEXT: [[T0:%.*]] = load i32, ptr [[A]], align 32
; DEFAULT-NEXT: ret i32 [[T0]]
;
; BUNDLES-LABEL: @foo2(
-; BUNDLES-NEXT: call void @llvm.assume(i1 true) [ "align"(i32* [[A:%.*]], i64 32) ]
-; BUNDLES-NEXT: [[T0:%.*]] = load i32, i32* [[A]], align 32
+; BUNDLES-NEXT: call void @llvm.assume(i1 true) [ "align"(ptr [[A:%.*]], i64 32) ]
+; BUNDLES-NEXT: [[T0:%.*]] = load i32, ptr [[A]], align 32
; BUNDLES-NEXT: ret i32 [[T0]]
;
- %ptrint = ptrtoint i32* %a to i64
+ %ptrint = ptrtoint ptr %a to i64
%maskedptr = and i64 %ptrint, 31
%maskcond = icmp eq i64 %maskedptr, 0
tail call void @llvm.assume(i1 %maskcond)
- %t0 = load i32, i32* %a, align 4
+ %t0 = load i32, ptr %a, align 4
ret i32 %t0
}
ret i1 %cond
}
-declare void @escape(i32* %a)
+declare void @escape(ptr %a)
; Canonicalize a nonnull assumption on a load into metadata form.
-define i32 @bundle1(i32* %P) {
+define i32 @bundle1(ptr %P) {
; CHECK-LABEL: @bundle1(
-; CHECK-NEXT: tail call void @llvm.assume(i1 true) [ "nonnull"(i32* [[P:%.*]]) ]
-; CHECK-NEXT: [[LOAD:%.*]] = load i32, i32* [[P]], align 4
+; CHECK-NEXT: tail call void @llvm.assume(i1 true) [ "nonnull"(ptr [[P:%.*]]) ]
+; CHECK-NEXT: [[LOAD:%.*]] = load i32, ptr [[P]], align 4
; CHECK-NEXT: ret i32 [[LOAD]]
;
- tail call void @llvm.assume(i1 true) ["nonnull"(i32* %P)]
- %load = load i32, i32* %P
+ tail call void @llvm.assume(i1 true) ["nonnull"(ptr %P)]
+ %load = load i32, ptr %P
ret i32 %load
}
-define i32 @bundle2(i32* %P) {
+define i32 @bundle2(ptr %P) {
; CHECK-LABEL: @bundle2(
-; CHECK-NEXT: [[LOAD:%.*]] = load i32, i32* [[P:%.*]], align 4
+; CHECK-NEXT: [[LOAD:%.*]] = load i32, ptr [[P:%.*]], align 4
; CHECK-NEXT: ret i32 [[LOAD]]
;
- tail call void @llvm.assume(i1 true) ["ignore"(i32* undef)]
- %load = load i32, i32* %P
+ tail call void @llvm.assume(i1 true) ["ignore"(ptr undef)]
+ %load = load i32, ptr %P
ret i32 %load
}
-define i1 @nonnull1(i32** %a) {
+define i1 @nonnull1(ptr %a) {
; CHECK-LABEL: @nonnull1(
-; CHECK-NEXT: [[LOAD:%.*]] = load i32*, i32** [[A:%.*]], align 8, !nonnull !6
-; CHECK-NEXT: tail call void @escape(i32* nonnull [[LOAD]])
+; CHECK-NEXT: [[LOAD:%.*]] = load ptr, ptr [[A:%.*]], align 8, !nonnull !6
+; CHECK-NEXT: tail call void @escape(ptr nonnull [[LOAD]])
; CHECK-NEXT: ret i1 false
;
- %load = load i32*, i32** %a
- %cmp = icmp ne i32* %load, null
+ %load = load ptr, ptr %a
+ %cmp = icmp ne ptr %load, null
tail call void @llvm.assume(i1 %cmp)
- tail call void @escape(i32* %load)
- %rval = icmp eq i32* %load, null
+ tail call void @escape(ptr %load)
+ %rval = icmp eq ptr %load, null
ret i1 %rval
}
; Make sure the above canonicalization applies only
; to pointer types. Doing otherwise would be illegal.
-define i1 @nonnull2(i32* %a) {
+define i1 @nonnull2(ptr %a) {
; CHECK-LABEL: @nonnull2(
-; CHECK-NEXT: [[LOAD:%.*]] = load i32, i32* [[A:%.*]], align 4
+; CHECK-NEXT: [[LOAD:%.*]] = load i32, ptr [[A:%.*]], align 4
; CHECK-NEXT: [[CMP:%.*]] = icmp ne i32 [[LOAD]], 0
; CHECK-NEXT: tail call void @llvm.assume(i1 [[CMP]])
; CHECK-NEXT: ret i1 false
;
- %load = load i32, i32* %a
+ %load = load i32, ptr %a
%cmp = icmp ne i32 %load, 0
tail call void @llvm.assume(i1 %cmp)
%rval = icmp eq i32 %load, 0
; Make sure the above canonicalization does not trigger
; if the assume is control dependent on something else
-define i1 @nonnull3(i32** %a, i1 %control) {
+define i1 @nonnull3(ptr %a, i1 %control) {
; FIXME: in the BUNDLES version we could duplicate the load and keep the assume nonnull.
; DEFAULT-LABEL: @nonnull3(
; DEFAULT-NEXT: entry:
-; DEFAULT-NEXT: [[LOAD:%.*]] = load i32*, i32** [[A:%.*]], align 8
-; DEFAULT-NEXT: [[CMP:%.*]] = icmp ne i32* [[LOAD]], null
+; DEFAULT-NEXT: [[LOAD:%.*]] = load ptr, ptr [[A:%.*]], align 8
+; DEFAULT-NEXT: [[CMP:%.*]] = icmp ne ptr [[LOAD]], null
; DEFAULT-NEXT: br i1 [[CONTROL:%.*]], label [[TAKEN:%.*]], label [[NOT_TAKEN:%.*]]
; DEFAULT: taken:
; DEFAULT-NEXT: tail call void @llvm.assume(i1 [[CMP]])
; DEFAULT-NEXT: ret i1 false
; DEFAULT: not_taken:
-; DEFAULT-NEXT: [[RVAL_2:%.*]] = icmp sgt i32* [[LOAD]], null
+; DEFAULT-NEXT: [[RVAL_2:%.*]] = icmp sgt ptr [[LOAD]], null
; DEFAULT-NEXT: ret i1 [[RVAL_2]]
;
; BUNDLES-LABEL: @nonnull3(
; BUNDLES: taken:
; BUNDLES-NEXT: ret i1 false
; BUNDLES: not_taken:
-; BUNDLES-NEXT: [[LOAD:%.*]] = load i32*, i32** [[A:%.*]], align 8
-; BUNDLES-NEXT: [[RVAL_2:%.*]] = icmp sgt i32* [[LOAD]], null
+; BUNDLES-NEXT: [[LOAD:%.*]] = load ptr, ptr [[A:%.*]], align 8
+; BUNDLES-NEXT: [[RVAL_2:%.*]] = icmp sgt ptr [[LOAD]], null
; BUNDLES-NEXT: ret i1 [[RVAL_2]]
;
entry:
- %load = load i32*, i32** %a
- %cmp = icmp ne i32* %load, null
+ %load = load ptr, ptr %a
+ %cmp = icmp ne ptr %load, null
br i1 %control, label %taken, label %not_taken
taken:
tail call void @llvm.assume(i1 %cmp)
- %rval = icmp eq i32* %load, null
+ %rval = icmp eq ptr %load, null
ret i1 %rval
not_taken:
- %rval.2 = icmp sgt i32* %load, null
+ %rval.2 = icmp sgt ptr %load, null
ret i1 %rval.2
}
; if the path from the load to the assume is potentially
; interrupted by an exception being thrown
-define i1 @nonnull4(i32** %a) {
+define i1 @nonnull4(ptr %a) {
; DEFAULT-LABEL: @nonnull4(
-; DEFAULT-NEXT: [[LOAD:%.*]] = load i32*, i32** [[A:%.*]], align 8
-; DEFAULT-NEXT: tail call void @escape(i32* [[LOAD]])
-; DEFAULT-NEXT: [[CMP:%.*]] = icmp ne i32* [[LOAD]], null
+; DEFAULT-NEXT: [[LOAD:%.*]] = load ptr, ptr [[A:%.*]], align 8
+; DEFAULT-NEXT: tail call void @escape(ptr [[LOAD]])
+; DEFAULT-NEXT: [[CMP:%.*]] = icmp ne ptr [[LOAD]], null
; DEFAULT-NEXT: tail call void @llvm.assume(i1 [[CMP]])
; DEFAULT-NEXT: ret i1 false
;
; BUNDLES-LABEL: @nonnull4(
-; BUNDLES-NEXT: [[LOAD:%.*]] = load i32*, i32** [[A:%.*]], align 8
-; BUNDLES-NEXT: tail call void @escape(i32* [[LOAD]])
-; BUNDLES-NEXT: call void @llvm.assume(i1 true) [ "nonnull"(i32* [[LOAD]]) ]
+; BUNDLES-NEXT: [[LOAD:%.*]] = load ptr, ptr [[A:%.*]], align 8
+; BUNDLES-NEXT: tail call void @escape(ptr [[LOAD]])
+; BUNDLES-NEXT: call void @llvm.assume(i1 true) [ "nonnull"(ptr [[LOAD]]) ]
; BUNDLES-NEXT: ret i1 false
;
- %load = load i32*, i32** %a
+ %load = load ptr, ptr %a
;; This call may throw!
- tail call void @escape(i32* %load)
- %cmp = icmp ne i32* %load, null
+ tail call void @escape(ptr %load)
+ %cmp = icmp ne ptr %load, null
tail call void @llvm.assume(i1 %cmp)
- %rval = icmp eq i32* %load, null
+ %rval = icmp eq ptr %load, null
ret i1 %rval
}
-define i1 @nonnull5(i32** %a) {
+define i1 @nonnull5(ptr %a) {
; CHECK-LABEL: @nonnull5(
-; CHECK-NEXT: [[LOAD:%.*]] = load i32*, i32** [[A:%.*]], align 8
-; CHECK-NEXT: tail call void @escape(i32* [[LOAD]])
-; CHECK-NEXT: [[CMP:%.*]] = icmp slt i32* [[LOAD]], null
+; CHECK-NEXT: [[LOAD:%.*]] = load ptr, ptr [[A:%.*]], align 8
+; CHECK-NEXT: tail call void @escape(ptr [[LOAD]])
+; CHECK-NEXT: [[CMP:%.*]] = icmp slt ptr [[LOAD]], null
; CHECK-NEXT: tail call void @llvm.assume(i1 [[CMP]])
; CHECK-NEXT: ret i1 false
;
- %load = load i32*, i32** %a
+ %load = load ptr, ptr %a
;; This call may throw!
- tail call void @escape(i32* %load)
- %integral = ptrtoint i32* %load to i64
+ tail call void @escape(ptr %load)
+ %integral = ptrtoint ptr %load to i64
%cmp = icmp slt i64 %integral, 0
tail call void @llvm.assume(i1 %cmp) ; %load has at least highest bit set
- %rval = icmp eq i32* %load, null
+ %rval = icmp eq ptr %load, null
ret i1 %rval
}
ret i32 %t2
}
-define i1 @nonnull3A(i32** %a, i1 %control) {
+define i1 @nonnull3A(ptr %a, i1 %control) {
; DEFAULT-LABEL: @nonnull3A(
; DEFAULT-NEXT: entry:
-; DEFAULT-NEXT: [[LOAD:%.*]] = load i32*, i32** [[A:%.*]], align 8
+; DEFAULT-NEXT: [[LOAD:%.*]] = load ptr, ptr [[A:%.*]], align 8
; DEFAULT-NEXT: br i1 [[CONTROL:%.*]], label [[TAKEN:%.*]], label [[NOT_TAKEN:%.*]]
; DEFAULT: taken:
-; DEFAULT-NEXT: [[CMP:%.*]] = icmp ne i32* [[LOAD]], null
+; DEFAULT-NEXT: [[CMP:%.*]] = icmp ne ptr [[LOAD]], null
; DEFAULT-NEXT: call void @llvm.assume(i1 [[CMP]])
; DEFAULT-NEXT: ret i1 true
; DEFAULT: not_taken:
-; DEFAULT-NEXT: [[RVAL_2:%.*]] = icmp sgt i32* [[LOAD]], null
+; DEFAULT-NEXT: [[RVAL_2:%.*]] = icmp sgt ptr [[LOAD]], null
; DEFAULT-NEXT: ret i1 [[RVAL_2]]
;
; BUNDLES-LABEL: @nonnull3A(
; BUNDLES: taken:
; BUNDLES-NEXT: ret i1 true
; BUNDLES: not_taken:
-; BUNDLES-NEXT: [[LOAD:%.*]] = load i32*, i32** [[A:%.*]], align 8
-; BUNDLES-NEXT: [[RVAL_2:%.*]] = icmp sgt i32* [[LOAD]], null
+; BUNDLES-NEXT: [[LOAD:%.*]] = load ptr, ptr [[A:%.*]], align 8
+; BUNDLES-NEXT: [[RVAL_2:%.*]] = icmp sgt ptr [[LOAD]], null
; BUNDLES-NEXT: ret i1 [[RVAL_2]]
;
entry:
- %load = load i32*, i32** %a
- %cmp = icmp ne i32* %load, null
+ %load = load ptr, ptr %a
+ %cmp = icmp ne ptr %load, null
br i1 %control, label %taken, label %not_taken
taken:
call void @llvm.assume(i1 %cmp)
ret i1 %cmp
not_taken:
call void @llvm.assume(i1 %cmp)
- %rval.2 = icmp sgt i32* %load, null
+ %rval.2 = icmp sgt ptr %load, null
ret i1 %rval.2
}
-define i1 @nonnull3B(i32** %a, i1 %control) {
+define i1 @nonnull3B(ptr %a, i1 %control) {
; CHECK-LABEL: @nonnull3B(
; CHECK-NEXT: entry:
; CHECK-NEXT: br i1 [[CONTROL:%.*]], label [[TAKEN:%.*]], label [[NOT_TAKEN:%.*]]
; CHECK: taken:
-; CHECK-NEXT: [[LOAD:%.*]] = load i32*, i32** [[A:%.*]], align 8
-; CHECK-NEXT: [[CMP:%.*]] = icmp ne i32* [[LOAD]], null
-; CHECK-NEXT: call void @llvm.assume(i1 [[CMP]]) [ "nonnull"(i32* [[LOAD]]) ]
+; CHECK-NEXT: [[LOAD:%.*]] = load ptr, ptr [[A:%.*]], align 8
+; CHECK-NEXT: [[CMP:%.*]] = icmp ne ptr [[LOAD]], null
+; CHECK-NEXT: call void @llvm.assume(i1 [[CMP]]) [ "nonnull"(ptr [[LOAD]]) ]
; CHECK-NEXT: ret i1 true
; CHECK: not_taken:
; CHECK-NEXT: ret i1 [[CONTROL]]
;
entry:
- %load = load i32*, i32** %a
- %cmp = icmp ne i32* %load, null
+ %load = load ptr, ptr %a
+ %cmp = icmp ne ptr %load, null
br i1 %control, label %taken, label %not_taken
taken:
- call void @llvm.assume(i1 %cmp) ["nonnull"(i32* %load)]
+ call void @llvm.assume(i1 %cmp) ["nonnull"(ptr %load)]
ret i1 %cmp
not_taken:
- call void @llvm.assume(i1 %cmp) ["nonnull"(i32* %load)]
+ call void @llvm.assume(i1 %cmp) ["nonnull"(ptr %load)]
ret i1 %control
}
declare i1 @tmp1(i1)
-define i1 @nonnull3C(i32** %a, i1 %control) {
+define i1 @nonnull3C(ptr %a, i1 %control) {
; CHECK-LABEL: @nonnull3C(
; CHECK-NEXT: entry:
; CHECK-NEXT: br i1 [[CONTROL:%.*]], label [[TAKEN:%.*]], label [[NOT_TAKEN:%.*]]
; CHECK: taken:
-; CHECK-NEXT: [[LOAD:%.*]] = load i32*, i32** [[A:%.*]], align 8
-; CHECK-NEXT: [[CMP:%.*]] = icmp ne i32* [[LOAD]], null
+; CHECK-NEXT: [[LOAD:%.*]] = load ptr, ptr [[A:%.*]], align 8
+; CHECK-NEXT: [[CMP:%.*]] = icmp ne ptr [[LOAD]], null
; CHECK-NEXT: [[CMP2:%.*]] = call i1 @tmp1(i1 [[CMP]])
; CHECK-NEXT: br label [[EXIT:%.*]]
; CHECK: exit:
; CHECK-NEXT: ret i1 [[CONTROL]]
;
entry:
- %load = load i32*, i32** %a
- %cmp = icmp ne i32* %load, null
+ %load = load ptr, ptr %a
+ %cmp = icmp ne ptr %load, null
br i1 %control, label %taken, label %not_taken
taken:
%cmp2 = call i1 @tmp1(i1 %cmp)
br label %exit
exit:
; FIXME: this shouldn't be dropped because it is still dominated by the new position of %load
- call void @llvm.assume(i1 %cmp) ["nonnull"(i32* %load)]
+ call void @llvm.assume(i1 %cmp) ["nonnull"(ptr %load)]
ret i1 %cmp2
not_taken:
call void @llvm.assume(i1 %cmp)
ret i1 %control
}
-define i1 @nonnull3D(i32** %a, i1 %control) {
+define i1 @nonnull3D(ptr %a, i1 %control) {
; CHECK-LABEL: @nonnull3D(
; CHECK-NEXT: entry:
; CHECK-NEXT: br i1 [[CONTROL:%.*]], label [[TAKEN:%.*]], label [[NOT_TAKEN:%.*]]
; CHECK: taken:
-; CHECK-NEXT: [[LOAD:%.*]] = load i32*, i32** [[A:%.*]], align 8
-; CHECK-NEXT: [[CMP:%.*]] = icmp ne i32* [[LOAD]], null
+; CHECK-NEXT: [[LOAD:%.*]] = load ptr, ptr [[A:%.*]], align 8
+; CHECK-NEXT: [[CMP:%.*]] = icmp ne ptr [[LOAD]], null
; CHECK-NEXT: [[CMP2:%.*]] = call i1 @tmp1(i1 [[CMP]])
; CHECK-NEXT: br label [[EXIT:%.*]]
; CHECK: exit:
; CHECK-NEXT: ret i1 [[CONTROL]]
;
entry:
- %load = load i32*, i32** %a
- %cmp = icmp ne i32* %load, null
+ %load = load ptr, ptr %a
+ %cmp = icmp ne ptr %load, null
br i1 %control, label %taken, label %not_taken
taken:
%cmp2 = call i1 @tmp1(i1 %cmp)
exit:
ret i1 %cmp2
not_taken:
- call void @llvm.assume(i1 %cmp) ["nonnull"(i32* %load)]
+ call void @llvm.assume(i1 %cmp) ["nonnull"(ptr %load)]
ret i1 %control
}
define i64 @PR31809() {
; CHECK-LABEL: @PR31809(
; CHECK-NEXT: [[A:%.*]] = alloca i32, align 4
-; CHECK-NEXT: [[T1:%.*]] = ptrtoint i32* [[A]] to i64
+; CHECK-NEXT: [[T1:%.*]] = ptrtoint ptr [[A]] to i64
; CHECK-NEXT: call void @llvm.assume(i1 false)
; CHECK-NEXT: ret i64 [[T1]]
;
%a = alloca i32
- %t1 = ptrtoint i32* %a to i64
+ %t1 = ptrtoint ptr %a to i64
%cond = icmp eq i64 %t1, 3
call void @llvm.assume(i1 %cond)
ret i64 %t1
unreachable
}
-define i32 @unreachable_assumes_and_store(i32 %x, i32 %y, i32* %p) {
+define i32 @unreachable_assumes_and_store(i32 %x, i32 %y, ptr %p) {
; CHECK-LABEL: @unreachable_assumes_and_store(
; CHECK-NEXT: entry:
; CHECK-NEXT: [[CMP0:%.*]] = icmp sgt i32 [[X:%.*]], 1
tail call void @llvm.assume(i1 %cmp4)
%cmp5 = icmp ugt i32 %y, 42
tail call void @llvm.assume(i1 %cmp5)
- store i32 %x, i32* %p
+ store i32 %x, ptr %p
unreachable
}
-define i32 @unreachable_assumes_and_store_logical(i32 %x, i32 %y, i32* %p) {
+define i32 @unreachable_assumes_and_store_logical(i32 %x, i32 %y, ptr %p) {
; CHECK-LABEL: @unreachable_assumes_and_store_logical(
; CHECK-NEXT: entry:
; CHECK-NEXT: [[CMP0:%.*]] = icmp sgt i32 [[X:%.*]], 1
tail call void @llvm.assume(i1 %cmp4)
%cmp5 = icmp ugt i32 %y, 42
tail call void @llvm.assume(i1 %cmp5)
- store i32 %x, i32* %p
+ store i32 %x, ptr %p
unreachable
}
-define void @canonicalize_assume(i32* %0) {
+define void @canonicalize_assume(ptr %0) {
; DEFAULT-LABEL: @canonicalize_assume(
-; DEFAULT-NEXT: [[TMP2:%.*]] = getelementptr inbounds i32, i32* [[TMP0:%.*]], i64 2
-; DEFAULT-NEXT: [[TMP3:%.*]] = bitcast i32* [[TMP2]] to i8*
-; DEFAULT-NEXT: call void @llvm.assume(i1 true) [ "align"(i8* [[TMP3]], i64 16) ]
+; DEFAULT-NEXT: [[TMP2:%.*]] = getelementptr inbounds i32, ptr [[TMP0:%.*]], i64 2
+; DEFAULT-NEXT: call void @llvm.assume(i1 true) [ "align"(ptr [[TMP2]], i64 16) ]
; DEFAULT-NEXT: ret void
;
; BUNDLES-LABEL: @canonicalize_assume(
-; BUNDLES-NEXT: call void @llvm.assume(i1 true) [ "align"(i32* [[TMP0:%.*]], i64 8) ]
+; BUNDLES-NEXT: call void @llvm.assume(i1 true) [ "align"(ptr [[TMP0:%.*]], i64 8) ]
; BUNDLES-NEXT: ret void
;
- %2 = getelementptr inbounds i32, i32* %0, i64 2
- %3 = bitcast i32* %2 to i8*
- call void @llvm.assume(i1 true) [ "align"(i8* %3, i64 16) ]
+ %2 = getelementptr inbounds i32, ptr %0, i64 2
+ call void @llvm.assume(i1 true) [ "align"(ptr %2, i64 16) ]
ret void
}
; Check transforms involving atomic operations
-define i32 @test1(i32* %p) {
+define i32 @test1(ptr %p) {
; CHECK-LABEL: @test1(
-; CHECK-NEXT: [[X:%.*]] = load atomic i32, i32* [[P:%.*]] seq_cst, align 4
+; CHECK-NEXT: [[X:%.*]] = load atomic i32, ptr [[P:%.*]] seq_cst, align 4
; CHECK-NEXT: [[Z:%.*]] = shl i32 [[X]], 1
; CHECK-NEXT: ret i32 [[Z]]
;
- %x = load atomic i32, i32* %p seq_cst, align 4
- %y = load i32, i32* %p, align 4
+ %x = load atomic i32, ptr %p seq_cst, align 4
+ %y = load i32, ptr %p, align 4
%z = add i32 %x, %y
ret i32 %z
}
-define i32 @test2(i32* %p) {
+define i32 @test2(ptr %p) {
; CHECK-LABEL: @test2(
-; CHECK-NEXT: [[X:%.*]] = load volatile i32, i32* [[P:%.*]], align 4
-; CHECK-NEXT: [[Y:%.*]] = load volatile i32, i32* [[P]], align 4
+; CHECK-NEXT: [[X:%.*]] = load volatile i32, ptr [[P:%.*]], align 4
+; CHECK-NEXT: [[Y:%.*]] = load volatile i32, ptr [[P]], align 4
; CHECK-NEXT: [[Z:%.*]] = add i32 [[X]], [[Y]]
; CHECK-NEXT: ret i32 [[Z]]
;
- %x = load volatile i32, i32* %p, align 4
- %y = load volatile i32, i32* %p, align 4
+ %x = load volatile i32, ptr %p, align 4
+ %y = load volatile i32, ptr %p, align 4
%z = add i32 %x, %y
ret i32 %z
}
; The exact semantics of mixing volatile and non-volatile on the same
; memory location are a bit unclear, but conservatively, we know we don't
; want to remove the volatile.
-define i32 @test3(i32* %p) {
+define i32 @test3(ptr %p) {
; CHECK-LABEL: @test3(
-; CHECK-NEXT: [[X:%.*]] = load volatile i32, i32* [[P:%.*]], align 4
+; CHECK-NEXT: [[X:%.*]] = load volatile i32, ptr [[P:%.*]], align 4
; CHECK-NEXT: [[Z:%.*]] = shl i32 [[X]], 1
; CHECK-NEXT: ret i32 [[Z]]
;
- %x = load volatile i32, i32* %p, align 4
- %y = load i32, i32* %p, align 4
+ %x = load volatile i32, ptr %p, align 4
+ %y = load i32, ptr %p, align 4
%z = add i32 %x, %y
ret i32 %z
}
; Forwarding from a stronger ordered atomic is fine
-define i32 @test4(i32* %p) {
+define i32 @test4(ptr %p) {
; CHECK-LABEL: @test4(
-; CHECK-NEXT: [[X:%.*]] = load atomic i32, i32* [[P:%.*]] seq_cst, align 4
+; CHECK-NEXT: [[X:%.*]] = load atomic i32, ptr [[P:%.*]] seq_cst, align 4
; CHECK-NEXT: [[Z:%.*]] = shl i32 [[X]], 1
; CHECK-NEXT: ret i32 [[Z]]
;
- %x = load atomic i32, i32* %p seq_cst, align 4
- %y = load atomic i32, i32* %p unordered, align 4
+ %x = load atomic i32, ptr %p seq_cst, align 4
+ %y = load atomic i32, ptr %p unordered, align 4
%z = add i32 %x, %y
ret i32 %z
}
; Forwarding from a non-atomic is not. (The earlier load
; could in priciple be promoted to atomic and then forwarded,
; but we can't just drop the atomic from the load.)
-define i32 @test5(i32* %p) {
+define i32 @test5(ptr %p) {
; CHECK-LABEL: @test5(
-; CHECK-NEXT: [[X:%.*]] = load atomic i32, i32* [[P:%.*]] unordered, align 4
+; CHECK-NEXT: [[X:%.*]] = load atomic i32, ptr [[P:%.*]] unordered, align 4
; CHECK-NEXT: [[Z:%.*]] = shl i32 [[X]], 1
; CHECK-NEXT: ret i32 [[Z]]
;
- %x = load atomic i32, i32* %p unordered, align 4
- %y = load i32, i32* %p, align 4
+ %x = load atomic i32, ptr %p unordered, align 4
+ %y = load i32, ptr %p, align 4
%z = add i32 %x, %y
ret i32 %z
}
; Forwarding atomic to atomic is fine
-define i32 @test6(i32* %p) {
+define i32 @test6(ptr %p) {
; CHECK-LABEL: @test6(
-; CHECK-NEXT: [[X:%.*]] = load atomic i32, i32* [[P:%.*]] unordered, align 4
+; CHECK-NEXT: [[X:%.*]] = load atomic i32, ptr [[P:%.*]] unordered, align 4
; CHECK-NEXT: [[Z:%.*]] = shl i32 [[X]], 1
; CHECK-NEXT: ret i32 [[Z]]
;
- %x = load atomic i32, i32* %p unordered, align 4
- %y = load atomic i32, i32* %p unordered, align 4
+ %x = load atomic i32, ptr %p unordered, align 4
+ %y = load atomic i32, ptr %p unordered, align 4
%z = add i32 %x, %y
ret i32 %z
}
; FIXME: we currently don't do anything for monotonic
-define i32 @test7(i32* %p) {
+define i32 @test7(ptr %p) {
; CHECK-LABEL: @test7(
-; CHECK-NEXT: [[X:%.*]] = load atomic i32, i32* [[P:%.*]] seq_cst, align 4
-; CHECK-NEXT: [[Y:%.*]] = load atomic i32, i32* [[P]] monotonic, align 4
+; CHECK-NEXT: [[X:%.*]] = load atomic i32, ptr [[P:%.*]] seq_cst, align 4
+; CHECK-NEXT: [[Y:%.*]] = load atomic i32, ptr [[P]] monotonic, align 4
; CHECK-NEXT: [[Z:%.*]] = add i32 [[X]], [[Y]]
; CHECK-NEXT: ret i32 [[Z]]
;
- %x = load atomic i32, i32* %p seq_cst, align 4
- %y = load atomic i32, i32* %p monotonic, align 4
+ %x = load atomic i32, ptr %p seq_cst, align 4
+ %y = load atomic i32, ptr %p monotonic, align 4
%z = add i32 %x, %y
ret i32 %z
}
; FIXME: We could forward in racy code
-define i32 @test8(i32* %p) {
+define i32 @test8(ptr %p) {
; CHECK-LABEL: @test8(
-; CHECK-NEXT: [[X:%.*]] = load atomic i32, i32* [[P:%.*]] seq_cst, align 4
-; CHECK-NEXT: [[Y:%.*]] = load atomic i32, i32* [[P]] acquire, align 4
+; CHECK-NEXT: [[X:%.*]] = load atomic i32, ptr [[P:%.*]] seq_cst, align 4
+; CHECK-NEXT: [[Y:%.*]] = load atomic i32, ptr [[P]] acquire, align 4
; CHECK-NEXT: [[Z:%.*]] = add i32 [[X]], [[Y]]
; CHECK-NEXT: ret i32 [[Z]]
;
- %x = load atomic i32, i32* %p seq_cst, align 4
- %y = load atomic i32, i32* %p acquire, align 4
+ %x = load atomic i32, ptr %p seq_cst, align 4
+ %y = load atomic i32, ptr %p acquire, align 4
%z = add i32 %x, %y
ret i32 %z
}
; ordering imposed.
define i32 @test9() {
; CHECK-LABEL: @test9(
-; CHECK-NEXT: store i32 poison, i32* null, align 4294967296
+; CHECK-NEXT: store i32 poison, ptr null, align 4294967296
; CHECK-NEXT: ret i32 poison
;
- %x = load atomic i32, i32* null unordered, align 4
+ %x = load atomic i32, ptr null unordered, align 4
ret i32 %x
}
define i32 @test9_no_null_opt() #0 {
; CHECK-LABEL: @test9_no_null_opt(
-; CHECK-NEXT: [[X:%.*]] = load atomic i32, i32* null unordered, align 4294967296
+; CHECK-NEXT: [[X:%.*]] = load atomic i32, ptr null unordered, align 4294967296
; CHECK-NEXT: ret i32 [[X]]
;
- %x = load atomic i32, i32* null unordered, align 4
+ %x = load atomic i32, ptr null unordered, align 4
ret i32 %x
}
; FIXME: Could also fold
define i32 @test10() {
; CHECK-LABEL: @test10(
-; CHECK-NEXT: [[X:%.*]] = load atomic i32, i32* null monotonic, align 4294967296
+; CHECK-NEXT: [[X:%.*]] = load atomic i32, ptr null monotonic, align 4294967296
; CHECK-NEXT: ret i32 [[X]]
;
- %x = load atomic i32, i32* null monotonic, align 4
+ %x = load atomic i32, ptr null monotonic, align 4
ret i32 %x
}
define i32 @test10_no_null_opt() #0 {
; CHECK-LABEL: @test10_no_null_opt(
-; CHECK-NEXT: [[X:%.*]] = load atomic i32, i32* null monotonic, align 4294967296
+; CHECK-NEXT: [[X:%.*]] = load atomic i32, ptr null monotonic, align 4294967296
; CHECK-NEXT: ret i32 [[X]]
;
- %x = load atomic i32, i32* null monotonic, align 4
+ %x = load atomic i32, ptr null monotonic, align 4
ret i32 %x
}
; Would this be legal to fold? Probably?
define i32 @test11() {
; CHECK-LABEL: @test11(
-; CHECK-NEXT: [[X:%.*]] = load atomic i32, i32* null seq_cst, align 4294967296
+; CHECK-NEXT: [[X:%.*]] = load atomic i32, ptr null seq_cst, align 4294967296
; CHECK-NEXT: ret i32 [[X]]
;
- %x = load atomic i32, i32* null seq_cst, align 4
+ %x = load atomic i32, ptr null seq_cst, align 4
ret i32 %x
}
define i32 @test11_no_null_opt() #0 {
; CHECK-LABEL: @test11_no_null_opt(
-; CHECK-NEXT: [[X:%.*]] = load atomic i32, i32* null seq_cst, align 4294967296
+; CHECK-NEXT: [[X:%.*]] = load atomic i32, ptr null seq_cst, align 4294967296
; CHECK-NEXT: ret i32 [[X]]
;
- %x = load atomic i32, i32* null seq_cst, align 4
+ %x = load atomic i32, ptr null seq_cst, align 4
ret i32 %x
}
; ordering imposed.
define i32 @test12() {
; CHECK-LABEL: @test12(
-; CHECK-NEXT: store atomic i32 poison, i32* null unordered, align 4294967296
+; CHECK-NEXT: store atomic i32 poison, ptr null unordered, align 4294967296
; CHECK-NEXT: ret i32 0
;
- store atomic i32 0, i32* null unordered, align 4
+ store atomic i32 0, ptr null unordered, align 4
ret i32 0
}
define i32 @test12_no_null_opt() #0 {
; CHECK-LABEL: @test12_no_null_opt(
-; CHECK-NEXT: store atomic i32 0, i32* null unordered, align 4294967296
+; CHECK-NEXT: store atomic i32 0, ptr null unordered, align 4294967296
; CHECK-NEXT: ret i32 0
;
- store atomic i32 0, i32* null unordered, align 4
+ store atomic i32 0, ptr null unordered, align 4
ret i32 0
}
; FIXME: Could also fold
define i32 @test13() {
; CHECK-LABEL: @test13(
-; CHECK-NEXT: store atomic i32 0, i32* null monotonic, align 4294967296
+; CHECK-NEXT: store atomic i32 0, ptr null monotonic, align 4294967296
; CHECK-NEXT: ret i32 0
;
- store atomic i32 0, i32* null monotonic, align 4
+ store atomic i32 0, ptr null monotonic, align 4
ret i32 0
}
define i32 @test13_no_null_opt() #0 {
; CHECK-LABEL: @test13_no_null_opt(
-; CHECK-NEXT: store atomic i32 0, i32* null monotonic, align 4294967296
+; CHECK-NEXT: store atomic i32 0, ptr null monotonic, align 4294967296
; CHECK-NEXT: ret i32 0
;
- store atomic i32 0, i32* null monotonic, align 4
+ store atomic i32 0, ptr null monotonic, align 4
ret i32 0
}
; Would this be legal to fold? Probably?
define i32 @test14() {
; CHECK-LABEL: @test14(
-; CHECK-NEXT: store atomic i32 0, i32* null seq_cst, align 4294967296
+; CHECK-NEXT: store atomic i32 0, ptr null seq_cst, align 4294967296
; CHECK-NEXT: ret i32 0
;
- store atomic i32 0, i32* null seq_cst, align 4
+ store atomic i32 0, ptr null seq_cst, align 4
ret i32 0
}
define i32 @test14_no_null_opt() #0 {
; CHECK-LABEL: @test14_no_null_opt(
-; CHECK-NEXT: store atomic i32 0, i32* null seq_cst, align 4294967296
+; CHECK-NEXT: store atomic i32 0, ptr null seq_cst, align 4294967296
; CHECK-NEXT: ret i32 0
;
- store atomic i32 0, i32* null seq_cst, align 4
+ store atomic i32 0, ptr null seq_cst, align 4
ret i32 0
}
define i32 @test15(i1 %cnd) {
; CHECK-LABEL: @test15(
-; CHECK-NEXT: [[A_VAL:%.*]] = load atomic i32, i32* @a unordered, align 4
-; CHECK-NEXT: [[B_VAL:%.*]] = load atomic i32, i32* @b unordered, align 4
+; CHECK-NEXT: [[A_VAL:%.*]] = load atomic i32, ptr @a unordered, align 4
+; CHECK-NEXT: [[B_VAL:%.*]] = load atomic i32, ptr @b unordered, align 4
; CHECK-NEXT: [[X:%.*]] = select i1 [[CND:%.*]], i32 [[A_VAL]], i32 [[B_VAL]]
; CHECK-NEXT: ret i32 [[X]]
;
- %addr = select i1 %cnd, i32* @a, i32* @b
- %x = load atomic i32, i32* %addr unordered, align 4
+ %addr = select i1 %cnd, ptr @a, ptr @b
+ %x = load atomic i32, ptr %addr unordered, align 4
ret i32 %x
}
; FIXME: This would be legal to transform
define i32 @test16(i1 %cnd) {
; CHECK-LABEL: @test16(
-; CHECK-NEXT: [[ADDR:%.*]] = select i1 [[CND:%.*]], i32* @a, i32* @b
-; CHECK-NEXT: [[X:%.*]] = load atomic i32, i32* [[ADDR]] monotonic, align 4
+; CHECK-NEXT: [[ADDR:%.*]] = select i1 [[CND:%.*]], ptr @a, ptr @b
+; CHECK-NEXT: [[X:%.*]] = load atomic i32, ptr [[ADDR]] monotonic, align 4
; CHECK-NEXT: ret i32 [[X]]
;
- %addr = select i1 %cnd, i32* @a, i32* @b
- %x = load atomic i32, i32* %addr monotonic, align 4
+ %addr = select i1 %cnd, ptr @a, ptr @b
+ %x = load atomic i32, ptr %addr monotonic, align 4
ret i32 %x
}
; FIXME: This would be legal to transform
define i32 @test17(i1 %cnd) {
; CHECK-LABEL: @test17(
-; CHECK-NEXT: [[ADDR:%.*]] = select i1 [[CND:%.*]], i32* @a, i32* @b
-; CHECK-NEXT: [[X:%.*]] = load atomic i32, i32* [[ADDR]] seq_cst, align 4
+; CHECK-NEXT: [[ADDR:%.*]] = select i1 [[CND:%.*]], ptr @a, ptr @b
+; CHECK-NEXT: [[X:%.*]] = load atomic i32, ptr [[ADDR]] seq_cst, align 4
; CHECK-NEXT: ret i32 [[X]]
;
- %addr = select i1 %cnd, i32* @a, i32* @b
- %x = load atomic i32, i32* %addr seq_cst, align 4
+ %addr = select i1 %cnd, ptr @a, ptr @b
+ %x = load atomic i32, ptr %addr seq_cst, align 4
ret i32 %x
}
; CHECK-NEXT: br label [[MERGE]]
; CHECK: merge:
; CHECK-NEXT: [[STOREMERGE:%.*]] = phi i32 [ 2, [[BLOCK2]] ], [ 1, [[BLOCK1]] ]
-; CHECK-NEXT: store atomic i32 [[STOREMERGE]], i32* @a unordered, align 4
+; CHECK-NEXT: store atomic i32 [[STOREMERGE]], ptr @a unordered, align 4
; CHECK-NEXT: ret i32 0
;
br i1 %cnd, label %block1, label %block2
block1:
- store atomic i32 1, i32* @a unordered, align 4
+ store atomic i32 1, ptr @a unordered, align 4
br label %merge
block2:
- store atomic i32 2, i32* @a unordered, align 4
+ store atomic i32 2, ptr @a unordered, align 4
br label %merge
merge:
; CHECK-LABEL: @test23(
; CHECK-NEXT: br i1 [[CND:%.*]], label [[BLOCK1:%.*]], label [[BLOCK2:%.*]]
; CHECK: block1:
-; CHECK-NEXT: store atomic i32 1, i32* @a monotonic, align 4
+; CHECK-NEXT: store atomic i32 1, ptr @a monotonic, align 4
; CHECK-NEXT: br label [[MERGE:%.*]]
; CHECK: block2:
-; CHECK-NEXT: store atomic i32 2, i32* @a monotonic, align 4
+; CHECK-NEXT: store atomic i32 2, ptr @a monotonic, align 4
; CHECK-NEXT: br label [[MERGE]]
; CHECK: merge:
; CHECK-NEXT: ret i32 0
br i1 %cnd, label %block1, label %block2
block1:
- store atomic i32 1, i32* @a monotonic, align 4
+ store atomic i32 1, ptr @a monotonic, align 4
br label %merge
block2:
- store atomic i32 2, i32* @a monotonic, align 4
+ store atomic i32 2, ptr @a monotonic, align 4
br label %merge
merge:
declare void @clobber()
-define i32 @test18(float* %p) {
+define i32 @test18(ptr %p) {
; CHECK-LABEL: @test18(
-; CHECK-NEXT: [[X:%.*]] = load atomic float, float* [[P:%.*]] unordered, align 4
+; CHECK-NEXT: [[X:%.*]] = load atomic float, ptr [[P:%.*]] unordered, align 4
; CHECK-NEXT: call void @clobber()
-; CHECK-NEXT: store atomic float [[X]], float* [[P]] unordered, align 4
+; CHECK-NEXT: store atomic float [[X]], ptr [[P]] unordered, align 4
; CHECK-NEXT: ret i32 0
;
- %x = load atomic float, float* %p unordered, align 4
+ %x = load atomic float, ptr %p unordered, align 4
call void @clobber() ;; keep the load around
- store atomic float %x, float* %p unordered, align 4
+ store atomic float %x, ptr %p unordered, align 4
ret i32 0
}
; TODO: probably also legal in this case
-define i32 @test19(float* %p) {
+define i32 @test19(ptr %p) {
; CHECK-LABEL: @test19(
-; CHECK-NEXT: [[X:%.*]] = load atomic float, float* [[P:%.*]] seq_cst, align 4
+; CHECK-NEXT: [[X:%.*]] = load atomic float, ptr [[P:%.*]] seq_cst, align 4
; CHECK-NEXT: call void @clobber()
-; CHECK-NEXT: store atomic float [[X]], float* [[P]] seq_cst, align 4
+; CHECK-NEXT: store atomic float [[X]], ptr [[P]] seq_cst, align 4
; CHECK-NEXT: ret i32 0
;
- %x = load atomic float, float* %p seq_cst, align 4
+ %x = load atomic float, ptr %p seq_cst, align 4
call void @clobber() ;; keep the load around
- store atomic float %x, float* %p seq_cst, align 4
+ store atomic float %x, ptr %p seq_cst, align 4
ret i32 0
}
-define i32 @test20(i32** %p, i8* %v) {
+define i32 @test20(ptr %p, ptr %v) {
; CHECK-LABEL: @test20(
-; CHECK-NEXT: [[TMP1:%.*]] = bitcast i32** [[P:%.*]] to i8**
-; CHECK-NEXT: store atomic i8* [[V:%.*]], i8** [[TMP1]] unordered, align 4
+; CHECK-NEXT: store atomic ptr [[V:%.*]], ptr [[P:%.*]] unordered, align 4
; CHECK-NEXT: ret i32 0
;
- %cast = bitcast i8* %v to i32*
- store atomic i32* %cast, i32** %p unordered, align 4
+ store atomic ptr %v, ptr %p unordered, align 4
ret i32 0
}
-define i32 @test21(i32** %p, i8* %v) {
+define i32 @test21(ptr %p, ptr %v) {
; CHECK-LABEL: @test21(
-; CHECK-NEXT: [[CAST:%.*]] = bitcast i8* [[V:%.*]] to i32*
-; CHECK-NEXT: store atomic i32* [[CAST]], i32** [[P:%.*]] monotonic, align 4
+; CHECK-NEXT: store atomic ptr [[V:%.*]], ptr [[P:%.*]] monotonic, align 4
; CHECK-NEXT: ret i32 0
;
- %cast = bitcast i8* %v to i32*
- store atomic i32* %cast, i32** %p monotonic, align 4
+ store atomic ptr %v, ptr %p monotonic, align 4
ret i32 0
}
-define void @pr27490a(i8** %p1, i8** %p2) {
+define void @pr27490a(ptr %p1, ptr %p2) {
; CHECK-LABEL: @pr27490a(
-; CHECK-NEXT: [[L:%.*]] = load i8*, i8** [[P1:%.*]], align 8
-; CHECK-NEXT: store volatile i8* [[L]], i8** [[P2:%.*]], align 8
+; CHECK-NEXT: [[L:%.*]] = load ptr, ptr [[P1:%.*]], align 8
+; CHECK-NEXT: store volatile ptr [[L]], ptr [[P2:%.*]], align 8
; CHECK-NEXT: ret void
;
- %l = load i8*, i8** %p1
- store volatile i8* %l, i8** %p2
+ %l = load ptr, ptr %p1
+ store volatile ptr %l, ptr %p2
ret void
}
-define void @pr27490b(i8** %p1, i8** %p2) {
+define void @pr27490b(ptr %p1, ptr %p2) {
; CHECK-LABEL: @pr27490b(
-; CHECK-NEXT: [[L:%.*]] = load i8*, i8** [[P1:%.*]], align 8
-; CHECK-NEXT: store atomic i8* [[L]], i8** [[P2:%.*]] seq_cst, align 8
+; CHECK-NEXT: [[L:%.*]] = load ptr, ptr [[P1:%.*]], align 8
+; CHECK-NEXT: store atomic ptr [[L]], ptr [[P2:%.*]] seq_cst, align 8
; CHECK-NEXT: ret void
;
- %l = load i8*, i8** %p1
- store atomic i8* %l, i8** %p2 seq_cst, align 8
+ %l = load ptr, ptr %p1
+ store atomic ptr %l, ptr %p2 seq_cst, align 8
ret void
}
;; At the moment, we can't form atomic vectors by folding since these are
;; not representable in the IR. This was pr29121. The right long term
;; solution is to extend the IR to handle this case.
-define <2 x float> @no_atomic_vector_load(i64* %p) {
+define <2 x float> @no_atomic_vector_load(ptr %p) {
; CHECK-LABEL: @no_atomic_vector_load(
-; CHECK-NEXT: [[LOAD:%.*]] = load atomic i64, i64* [[P:%.*]] unordered, align 8
+; CHECK-NEXT: [[LOAD:%.*]] = load atomic i64, ptr [[P:%.*]] unordered, align 8
; CHECK-NEXT: [[DOTCAST:%.*]] = bitcast i64 [[LOAD]] to <2 x float>
; CHECK-NEXT: ret <2 x float> [[DOTCAST]]
;
- %load = load atomic i64, i64* %p unordered, align 8
+ %load = load atomic i64, ptr %p unordered, align 8
%.cast = bitcast i64 %load to <2 x float>
ret <2 x float> %.cast
}
-define void @no_atomic_vector_store(<2 x float> %p, i8* %p2) {
+define void @no_atomic_vector_store(<2 x float> %p, ptr %p2) {
; CHECK-LABEL: @no_atomic_vector_store(
; CHECK-NEXT: [[TMP1:%.*]] = bitcast <2 x float> [[P:%.*]] to i64
-; CHECK-NEXT: [[TMP2:%.*]] = bitcast i8* [[P2:%.*]] to i64*
-; CHECK-NEXT: store atomic i64 [[TMP1]], i64* [[TMP2]] unordered, align 8
+; CHECK-NEXT: store atomic i64 [[TMP1]], ptr [[P2:%.*]] unordered, align 8
; CHECK-NEXT: ret void
;
%1 = bitcast <2 x float> %p to i64
- %2 = bitcast i8* %p2 to i64*
- store atomic i64 %1, i64* %2 unordered, align 8
+ store atomic i64 %1, ptr %p2 unordered, align 8
ret void
}
; CHECK-LABEL: @atomic_load_from_constant_global(
; CHECK-NEXT: ret i32 42
;
- %v = load atomic i32, i32* @c seq_cst, align 4
+ %v = load atomic i32, ptr @c seq_cst, align 4
ret i32 %v
}
; CHECK-LABEL: @atomic_load_from_constant_global_bitcast(
; CHECK-NEXT: ret i8 42
;
- %v = load atomic i8, i8* bitcast (i32* @c to i8*) seq_cst, align 1
+ %v = load atomic i8, ptr @c seq_cst, align 1
ret i8 %v
}
define void @atomic_load_from_non_constant_global() {
; CHECK-LABEL: @atomic_load_from_non_constant_global(
-; CHECK-NEXT: [[TMP1:%.*]] = load atomic i32, i32* @g seq_cst, align 4
+; CHECK-NEXT: [[TMP1:%.*]] = load atomic i32, ptr @g seq_cst, align 4
; CHECK-NEXT: ret void
;
- load atomic i32, i32* @g seq_cst, align 4
+ load atomic i32, ptr @g seq_cst, align 4
ret void
}
define void @volatile_load_from_constant_global() {
; CHECK-LABEL: @volatile_load_from_constant_global(
-; CHECK-NEXT: [[TMP1:%.*]] = load volatile i32, i32* @c, align 4
+; CHECK-NEXT: [[TMP1:%.*]] = load volatile i32, ptr @c, align 4
; CHECK-NEXT: ret void
;
- load volatile i32, i32* @c, align 4
+ load volatile i32, ptr @c, align 4
ret void
}
; - the ordering of atomicrmw is compatible with a load (i.e., no release semantic)
; CHECK-LABEL: atomic_add_zero
-; CHECK-NEXT: %res = load atomic i32, i32* %addr monotonic, align 4
+; CHECK-NEXT: %res = load atomic i32, ptr %addr monotonic, align 4
; CHECK-NEXT: ret i32 %res
-define i32 @atomic_add_zero(i32* %addr) {
- %res = atomicrmw add i32* %addr, i32 0 monotonic
+define i32 @atomic_add_zero(ptr %addr) {
+ %res = atomicrmw add ptr %addr, i32 0 monotonic
ret i32 %res
}
; CHECK-LABEL: atomic_or_zero
-; CHECK-NEXT: %res = load atomic i32, i32* %addr monotonic, align 4
+; CHECK-NEXT: %res = load atomic i32, ptr %addr monotonic, align 4
; CHECK-NEXT: ret i32 %res
-define i32 @atomic_or_zero(i32* %addr) {
- %res = atomicrmw add i32* %addr, i32 0 monotonic
+define i32 @atomic_or_zero(ptr %addr) {
+ %res = atomicrmw add ptr %addr, i32 0 monotonic
ret i32 %res
}
; CHECK-LABEL: atomic_sub_zero
-; CHECK-NEXT: %res = load atomic i32, i32* %addr monotonic, align 4
+; CHECK-NEXT: %res = load atomic i32, ptr %addr monotonic, align 4
; CHECK-NEXT: ret i32 %res
-define i32 @atomic_sub_zero(i32* %addr) {
- %res = atomicrmw sub i32* %addr, i32 0 monotonic
+define i32 @atomic_sub_zero(ptr %addr) {
+ %res = atomicrmw sub ptr %addr, i32 0 monotonic
ret i32 %res
}
; CHECK-LABEL: atomic_and_allones
-; CHECK-NEXT: %res = load atomic i32, i32* %addr monotonic, align 4
+; CHECK-NEXT: %res = load atomic i32, ptr %addr monotonic, align 4
; CHECK-NEXT: ret i32 %res
-define i32 @atomic_and_allones(i32* %addr) {
- %res = atomicrmw and i32* %addr, i32 -1 monotonic
+define i32 @atomic_and_allones(ptr %addr) {
+ %res = atomicrmw and ptr %addr, i32 -1 monotonic
ret i32 %res
}
; CHECK-LABEL: atomic_umin_uint_max
-; CHECK-NEXT: %res = load atomic i32, i32* %addr monotonic, align 4
+; CHECK-NEXT: %res = load atomic i32, ptr %addr monotonic, align 4
; CHECK-NEXT: ret i32 %res
-define i32 @atomic_umin_uint_max(i32* %addr) {
- %res = atomicrmw umin i32* %addr, i32 -1 monotonic
+define i32 @atomic_umin_uint_max(ptr %addr) {
+ %res = atomicrmw umin ptr %addr, i32 -1 monotonic
ret i32 %res
}
; CHECK-LABEL: atomic_umax_zero
-; CHECK-NEXT: %res = load atomic i32, i32* %addr monotonic, align 4
+; CHECK-NEXT: %res = load atomic i32, ptr %addr monotonic, align 4
; CHECK-NEXT: ret i32 %res
-define i32 @atomic_umax_zero(i32* %addr) {
- %res = atomicrmw umax i32* %addr, i32 0 monotonic
+define i32 @atomic_umax_zero(ptr %addr) {
+ %res = atomicrmw umax ptr %addr, i32 0 monotonic
ret i32 %res
}
; CHECK-LABEL: atomic_min_smax_char
-; CHECK-NEXT: %res = load atomic i8, i8* %addr monotonic, align 1
+; CHECK-NEXT: %res = load atomic i8, ptr %addr monotonic, align 1
; CHECK-NEXT: ret i8 %res
-define i8 @atomic_min_smax_char(i8* %addr) {
- %res = atomicrmw min i8* %addr, i8 127 monotonic
+define i8 @atomic_min_smax_char(ptr %addr) {
+ %res = atomicrmw min ptr %addr, i8 127 monotonic
ret i8 %res
}
; CHECK-LABEL: atomic_max_smin_char
-; CHECK-NEXT: %res = load atomic i8, i8* %addr monotonic, align 1
+; CHECK-NEXT: %res = load atomic i8, ptr %addr monotonic, align 1
; CHECK-NEXT: ret i8 %res
-define i8 @atomic_max_smin_char(i8* %addr) {
- %res = atomicrmw max i8* %addr, i8 -128 monotonic
+define i8 @atomic_max_smin_char(ptr %addr) {
+ %res = atomicrmw max ptr %addr, i8 -128 monotonic
ret i8 %res
}
; CHECK-LABEL: atomic_fsub
-; CHECK-NEXT: %res = load atomic float, float* %addr monotonic, align 4
+; CHECK-NEXT: %res = load atomic float, ptr %addr monotonic, align 4
; CHECK-NEXT: ret float %res
-define float @atomic_fsub_zero(float* %addr) {
- %res = atomicrmw fsub float* %addr, float 0.0 monotonic
+define float @atomic_fsub_zero(ptr %addr) {
+ %res = atomicrmw fsub ptr %addr, float 0.0 monotonic
ret float %res
}
; CHECK-LABEL: atomic_fadd
-; CHECK-NEXT: %res = load atomic float, float* %addr monotonic, align 4
+; CHECK-NEXT: %res = load atomic float, ptr %addr monotonic, align 4
; CHECK-NEXT: ret float %res
-define float @atomic_fadd_zero(float* %addr) {
- %res = atomicrmw fadd float* %addr, float -0.0 monotonic
+define float @atomic_fadd_zero(ptr %addr) {
+ %res = atomicrmw fadd ptr %addr, float -0.0 monotonic
ret float %res
}
; CHECK-LABEL: atomic_fsub_canon
-; CHECK-NEXT: %res = atomicrmw fadd float* %addr, float -0.000000e+00 release
+; CHECK-NEXT: %res = atomicrmw fadd ptr %addr, float -0.000000e+00 release
; CHECK-NEXT: ret float %res
-define float @atomic_fsub_canon(float* %addr) {
- %res = atomicrmw fsub float* %addr, float 0.0 release
+define float @atomic_fsub_canon(ptr %addr) {
+ %res = atomicrmw fsub ptr %addr, float 0.0 release
ret float %res
}
; CHECK-LABEL: atomic_fadd_canon
-; CHECK-NEXT: %res = atomicrmw fadd float* %addr, float -0.000000e+00 release
+; CHECK-NEXT: %res = atomicrmw fadd ptr %addr, float -0.000000e+00 release
; CHECK-NEXT: ret float %res
-define float @atomic_fadd_canon(float* %addr) {
- %res = atomicrmw fadd float* %addr, float -0.0 release
+define float @atomic_fadd_canon(ptr %addr) {
+ %res = atomicrmw fadd ptr %addr, float -0.0 release
ret float %res
}
; Can't replace a volatile w/a load; this would eliminate a volatile store.
; CHECK-LABEL: atomic_sub_zero_volatile
-; CHECK-NEXT: %res = atomicrmw volatile sub i64* %addr, i64 0 acquire
+; CHECK-NEXT: %res = atomicrmw volatile sub ptr %addr, i64 0 acquire
; CHECK-NEXT: ret i64 %res
-define i64 @atomic_sub_zero_volatile(i64* %addr) {
- %res = atomicrmw volatile sub i64* %addr, i64 0 acquire
+define i64 @atomic_sub_zero_volatile(ptr %addr) {
+ %res = atomicrmw volatile sub ptr %addr, i64 0 acquire
ret i64 %res
}
; Check that the transformation properly preserve the syncscope.
; CHECK-LABEL: atomic_syncscope
-; CHECK-NEXT: %res = load atomic i16, i16* %addr syncscope("some_syncscope") acquire, align 2
+; CHECK-NEXT: %res = load atomic i16, ptr %addr syncscope("some_syncscope") acquire, align 2
; CHECK-NEXT: ret i16 %res
-define i16 @atomic_syncscope(i16* %addr) {
- %res = atomicrmw or i16* %addr, i16 0 syncscope("some_syncscope") acquire
+define i16 @atomic_syncscope(ptr %addr) {
+ %res = atomicrmw or ptr %addr, i16 0 syncscope("some_syncscope") acquire
ret i16 %res
}
; By eliminating the store part of the atomicrmw, we would get rid of the
; release semantic, which is incorrect. We can canonicalize the operation.
; CHECK-LABEL: atomic_seq_cst
-; CHECK-NEXT: %res = atomicrmw or i16* %addr, i16 0 seq_cst
+; CHECK-NEXT: %res = atomicrmw or ptr %addr, i16 0 seq_cst
; CHECK-NEXT: ret i16 %res
-define i16 @atomic_seq_cst(i16* %addr) {
- %res = atomicrmw add i16* %addr, i16 0 seq_cst
+define i16 @atomic_seq_cst(ptr %addr) {
+ %res = atomicrmw add ptr %addr, i16 0 seq_cst
ret i16 %res
}
; Check that the transformation does not apply when the value is changed by
; the atomic operation (non zero constant).
; CHECK-LABEL: atomic_add_non_zero
-; CHECK-NEXT: %res = atomicrmw add i16* %addr, i16 2 monotonic
+; CHECK-NEXT: %res = atomicrmw add ptr %addr, i16 2 monotonic
; CHECK-NEXT: ret i16 %res
-define i16 @atomic_add_non_zero(i16* %addr) {
- %res = atomicrmw add i16* %addr, i16 2 monotonic
+define i16 @atomic_add_non_zero(ptr %addr) {
+ %res = atomicrmw add ptr %addr, i16 2 monotonic
ret i16 %res
}
; CHECK-LABEL: atomic_xor_zero
-; CHECK-NEXT: %res = load atomic i16, i16* %addr monotonic, align 2
+; CHECK-NEXT: %res = load atomic i16, ptr %addr monotonic, align 2
; CHECK-NEXT: ret i16 %res
-define i16 @atomic_xor_zero(i16* %addr) {
- %res = atomicrmw xor i16* %addr, i16 0 monotonic
+define i16 @atomic_xor_zero(ptr %addr) {
+ %res = atomicrmw xor ptr %addr, i16 0 monotonic
ret i16 %res
}
; Check that the transformation does not apply when the ordering is
; incompatible with a load (release). Do canonicalize.
; CHECK-LABEL: atomic_release
-; CHECK-NEXT: %res = atomicrmw or i16* %addr, i16 0 release
+; CHECK-NEXT: %res = atomicrmw or ptr %addr, i16 0 release
; CHECK-NEXT: ret i16 %res
-define i16 @atomic_release(i16* %addr) {
- %res = atomicrmw sub i16* %addr, i16 0 release
+define i16 @atomic_release(ptr %addr) {
+ %res = atomicrmw sub ptr %addr, i16 0 release
ret i16 %res
}
; Check that the transformation does not apply when the ordering is
; incompatible with a load (acquire, release). Do canonicalize.
; CHECK-LABEL: atomic_acq_rel
-; CHECK-NEXT: %res = atomicrmw or i16* %addr, i16 0 acq_rel
+; CHECK-NEXT: %res = atomicrmw or ptr %addr, i16 0 acq_rel
; CHECK-NEXT: ret i16 %res
-define i16 @atomic_acq_rel(i16* %addr) {
- %res = atomicrmw xor i16* %addr, i16 0 acq_rel
+define i16 @atomic_acq_rel(ptr %addr) {
+ %res = atomicrmw xor ptr %addr, i16 0 acq_rel
ret i16 %res
}
; CHECK-LABEL: sat_or_allones
-; CHECK-NEXT: %res = atomicrmw xchg i32* %addr, i32 -1 monotonic
+; CHECK-NEXT: %res = atomicrmw xchg ptr %addr, i32 -1 monotonic
; CHECK-NEXT: ret i32 %res
-define i32 @sat_or_allones(i32* %addr) {
- %res = atomicrmw or i32* %addr, i32 -1 monotonic
+define i32 @sat_or_allones(ptr %addr) {
+ %res = atomicrmw or ptr %addr, i32 -1 monotonic
ret i32 %res
}
; CHECK-LABEL: sat_and_zero
-; CHECK-NEXT: %res = atomicrmw xchg i32* %addr, i32 0 monotonic
+; CHECK-NEXT: %res = atomicrmw xchg ptr %addr, i32 0 monotonic
; CHECK-NEXT: ret i32 %res
-define i32 @sat_and_zero(i32* %addr) {
- %res = atomicrmw and i32* %addr, i32 0 monotonic
+define i32 @sat_and_zero(ptr %addr) {
+ %res = atomicrmw and ptr %addr, i32 0 monotonic
ret i32 %res
}
; CHECK-LABEL: sat_umin_uint_min
-; CHECK-NEXT: %res = atomicrmw xchg i32* %addr, i32 0 monotonic
+; CHECK-NEXT: %res = atomicrmw xchg ptr %addr, i32 0 monotonic
; CHECK-NEXT: ret i32 %res
-define i32 @sat_umin_uint_min(i32* %addr) {
- %res = atomicrmw umin i32* %addr, i32 0 monotonic
+define i32 @sat_umin_uint_min(ptr %addr) {
+ %res = atomicrmw umin ptr %addr, i32 0 monotonic
ret i32 %res
}
; CHECK-LABEL: sat_umax_uint_max
-; CHECK-NEXT: %res = atomicrmw xchg i32* %addr, i32 -1 monotonic
+; CHECK-NEXT: %res = atomicrmw xchg ptr %addr, i32 -1 monotonic
; CHECK-NEXT: ret i32 %res
-define i32 @sat_umax_uint_max(i32* %addr) {
- %res = atomicrmw umax i32* %addr, i32 -1 monotonic
+define i32 @sat_umax_uint_max(ptr %addr) {
+ %res = atomicrmw umax ptr %addr, i32 -1 monotonic
ret i32 %res
}
; CHECK-LABEL: sat_min_smin_char
-; CHECK-NEXT: %res = atomicrmw xchg i8* %addr, i8 -128 monotonic
+; CHECK-NEXT: %res = atomicrmw xchg ptr %addr, i8 -128 monotonic
; CHECK-NEXT: ret i8 %res
-define i8 @sat_min_smin_char(i8* %addr) {
- %res = atomicrmw min i8* %addr, i8 -128 monotonic
+define i8 @sat_min_smin_char(ptr %addr) {
+ %res = atomicrmw min ptr %addr, i8 -128 monotonic
ret i8 %res
}
; CHECK-LABEL: sat_max_smax_char
-; CHECK-NEXT: %res = atomicrmw xchg i8* %addr, i8 127 monotonic
+; CHECK-NEXT: %res = atomicrmw xchg ptr %addr, i8 127 monotonic
; CHECK-NEXT: ret i8 %res
-define i8 @sat_max_smax_char(i8* %addr) {
- %res = atomicrmw max i8* %addr, i8 127 monotonic
+define i8 @sat_max_smax_char(ptr %addr) {
+ %res = atomicrmw max ptr %addr, i8 127 monotonic
ret i8 %res
}
; CHECK-LABEL: sat_fadd_nan
-; CHECK-NEXT: %res = atomicrmw xchg double* %addr, double 0x7FF00000FFFFFFFF release
+; CHECK-NEXT: %res = atomicrmw xchg ptr %addr, double 0x7FF00000FFFFFFFF release
; CHECK-NEXT: ret double %res
-define double @sat_fadd_nan(double* %addr) {
- %res = atomicrmw fadd double* %addr, double 0x7FF00000FFFFFFFF release
+define double @sat_fadd_nan(ptr %addr) {
+ %res = atomicrmw fadd ptr %addr, double 0x7FF00000FFFFFFFF release
ret double %res
}
; CHECK-LABEL: sat_fsub_nan
-; CHECK-NEXT: %res = atomicrmw xchg double* %addr, double 0x7FF00000FFFFFFFF release
+; CHECK-NEXT: %res = atomicrmw xchg ptr %addr, double 0x7FF00000FFFFFFFF release
; CHECK-NEXT: ret double %res
-define double @sat_fsub_nan(double* %addr) {
- %res = atomicrmw fsub double* %addr, double 0x7FF00000FFFFFFFF release
+define double @sat_fsub_nan(ptr %addr) {
+ %res = atomicrmw fsub ptr %addr, double 0x7FF00000FFFFFFFF release
ret double %res
}
; CHECK-LABEL: sat_fsub_nan_unused
-; CHECK-NEXT: store atomic double 0x7FF00000FFFFFFFF, double* %addr monotonic, align 8
+; CHECK-NEXT: store atomic double 0x7FF00000FFFFFFFF, ptr %addr monotonic, align 8
; CHECK-NEXT: ret void
-define void @sat_fsub_nan_unused(double* %addr) {
- atomicrmw fsub double* %addr, double 0x7FF00000FFFFFFFF monotonic
+define void @sat_fsub_nan_unused(ptr %addr) {
+ atomicrmw fsub ptr %addr, double 0x7FF00000FFFFFFFF monotonic
ret void
}
; CHECK-LABEL: xchg_unused_monotonic
-; CHECK-NEXT: store atomic i32 0, i32* %addr monotonic, align 4
+; CHECK-NEXT: store atomic i32 0, ptr %addr monotonic, align 4
; CHECK-NEXT: ret void
-define void @xchg_unused_monotonic(i32* %addr) {
- atomicrmw xchg i32* %addr, i32 0 monotonic
+define void @xchg_unused_monotonic(ptr %addr) {
+ atomicrmw xchg ptr %addr, i32 0 monotonic
ret void
}
; CHECK-LABEL: xchg_unused_release
-; CHECK-NEXT: store atomic i32 -1, i32* %addr release, align 4
+; CHECK-NEXT: store atomic i32 -1, ptr %addr release, align 4
; CHECK-NEXT: ret void
-define void @xchg_unused_release(i32* %addr) {
- atomicrmw xchg i32* %addr, i32 -1 release
+define void @xchg_unused_release(ptr %addr) {
+ atomicrmw xchg ptr %addr, i32 -1 release
ret void
}
; CHECK-LABEL: xchg_unused_seq_cst
-; CHECK-NEXT: atomicrmw xchg i32* %addr, i32 0 seq_cst
+; CHECK-NEXT: atomicrmw xchg ptr %addr, i32 0 seq_cst
; CHECK-NEXT: ret void
-define void @xchg_unused_seq_cst(i32* %addr) {
- atomicrmw xchg i32* %addr, i32 0 seq_cst
+define void @xchg_unused_seq_cst(ptr %addr) {
+ atomicrmw xchg ptr %addr, i32 0 seq_cst
ret void
}
; CHECK-LABEL: xchg_unused_volatile
-; CHECK-NEXT: atomicrmw volatile xchg i32* %addr, i32 0 monotonic
+; CHECK-NEXT: atomicrmw volatile xchg ptr %addr, i32 0 monotonic
; CHECK-NEXT: ret void
-define void @xchg_unused_volatile(i32* %addr) {
- atomicrmw volatile xchg i32* %addr, i32 0 monotonic
+define void @xchg_unused_volatile(ptr %addr) {
+ atomicrmw volatile xchg ptr %addr, i32 0 monotonic
ret void
}
; CHECK-LABEL: sat_or_allones_unused
-; CHECK-NEXT: store atomic i32 -1, i32* %addr monotonic, align 4
+; CHECK-NEXT: store atomic i32 -1, ptr %addr monotonic, align 4
; CHECK-NEXT: ret void
-define void @sat_or_allones_unused(i32* %addr) {
- atomicrmw or i32* %addr, i32 -1 monotonic
+define void @sat_or_allones_unused(ptr %addr) {
+ atomicrmw or ptr %addr, i32 -1 monotonic
ret void
}
; CHECK-LABEL: undef_operand_unused
-; CHECK-NEXT: atomicrmw or i32* %addr, i32 undef monotonic
+; CHECK-NEXT: atomicrmw or ptr %addr, i32 undef monotonic
; CHECK-NEXT: ret void
-define void @undef_operand_unused(i32* %addr) {
- atomicrmw or i32* %addr, i32 undef monotonic
+define void @undef_operand_unused(ptr %addr) {
+ atomicrmw or ptr %addr, i32 undef monotonic
ret void
}
; CHECK-LABEL: undef_operand_used
-; CHECK-NEXT: %res = atomicrmw or i32* %addr, i32 undef monotonic
+; CHECK-NEXT: %res = atomicrmw or ptr %addr, i32 undef monotonic
; CHECK-NEXT: ret i32 %res
-define i32 @undef_operand_used(i32* %addr) {
- %res = atomicrmw or i32* %addr, i32 undef monotonic
+define i32 @undef_operand_used(ptr %addr) {
+ %res = atomicrmw or ptr %addr, i32 undef monotonic
ret i32 %res
}
; CHECK-LABEL: sat_fmax_inf
-; CHECK-NEXT: %res = atomicrmw xchg double* %addr, double 0x7FF0000000000000 monotonic
+; CHECK-NEXT: %res = atomicrmw xchg ptr %addr, double 0x7FF0000000000000 monotonic
; CHECK-NEXT: ret double %res
-define double @sat_fmax_inf(double* %addr) {
- %res = atomicrmw fmax double* %addr, double 0x7FF0000000000000 monotonic
+define double @sat_fmax_inf(ptr %addr) {
+ %res = atomicrmw fmax ptr %addr, double 0x7FF0000000000000 monotonic
ret double %res
}
; CHECK-LABEL: no_sat_fmax_inf
-; CHECK-NEXT: %res = atomicrmw fmax double* %addr, double 1.000000e-01 monotonic
+; CHECK-NEXT: %res = atomicrmw fmax ptr %addr, double 1.000000e-01 monotonic
; CHECK-NEXT: ret double %res
-define double @no_sat_fmax_inf(double* %addr) {
- %res = atomicrmw fmax double* %addr, double 1.000000e-01 monotonic
+define double @no_sat_fmax_inf(ptr %addr) {
+ %res = atomicrmw fmax ptr %addr, double 1.000000e-01 monotonic
ret double %res
}
; CHECK-LABEL: sat_fmin_inf
-; CHECK-NEXT: %res = atomicrmw xchg double* %addr, double 0xFFF0000000000000 monotonic
+; CHECK-NEXT: %res = atomicrmw xchg ptr %addr, double 0xFFF0000000000000 monotonic
; CHECK-NEXT: ret double %res
-define double @sat_fmin_inf(double* %addr) {
- %res = atomicrmw fmin double* %addr, double 0xFFF0000000000000 monotonic
+define double @sat_fmin_inf(ptr %addr) {
+ %res = atomicrmw fmin ptr %addr, double 0xFFF0000000000000 monotonic
ret double %res
}
; CHECK-LABEL: no_sat_fmin_inf
-; CHECK-NEXT: %res = atomicrmw fmin double* %addr, double 1.000000e-01 monotonic
+; CHECK-NEXT: %res = atomicrmw fmin ptr %addr, double 1.000000e-01 monotonic
; CHECK-NEXT: ret double %res
-define double @no_sat_fmin_inf(double* %addr) {
- %res = atomicrmw fmin double* %addr, double 1.000000e-01 monotonic
+define double @no_sat_fmin_inf(ptr %addr) {
+ %res = atomicrmw fmin ptr %addr, double 1.000000e-01 monotonic
ret double %res
}
target datalayout = "e-p:64:64:64-i1:8:8-i8:8:8-i16:16:16-i32:32:32-i64:64:64-f32:32:32-f64:64:64-v64:64:64-v128:128:128-a0:0:64-s0:64:64-f80:128:128"
target triple = "x86_64-apple-darwin10.0"
-declare noalias i8* @malloc(i64) nounwind allockind("alloc,uninitialized") "alloc-family"="malloc"
-declare void @free(i8*) allockind("free") "alloc-family"="malloc"
+declare noalias ptr @malloc(i64) nounwind allockind("alloc,uninitialized") "alloc-family"="malloc"
+declare void @free(ptr) allockind("free") "alloc-family"="malloc"
; PR5130
define i1 @test1() {
- %A = call noalias i8* @malloc(i64 4) nounwind
- %B = icmp eq i8* %A, null
- store i8 0, i8* %A
+ %A = call noalias ptr @malloc(i64 4) nounwind
+ %B = icmp eq ptr %A, null
+ store i8 0, ptr %A
- call void @free(i8* %A)
+ call void @free(ptr %A)
ret i1 %B
; CHECK-LABEL: @test1(
}
; CHECK-LABEL: @test2(
-define noalias i8* @test2() nounwind {
+define noalias ptr @test2() nounwind {
entry:
; CHECK: @malloc
- %A = call noalias i8* @malloc(i64 4) nounwind
+ %A = call noalias ptr @malloc(i64 4) nounwind
; CHECK: icmp eq
- %tobool = icmp eq i8* %A, null
+ %tobool = icmp eq ptr %A, null
; CHECK: br i1
br i1 %tobool, label %return, label %if.end
if.end:
; CHECK: store
- store i8 7, i8* %A
+ store i8 7, ptr %A
br label %return
return:
; CHECK: phi
- %retval.0 = phi i8* [ %A, %if.end ], [ null, %entry ]
- ret i8* %retval.0
+ %retval.0 = phi ptr [ %A, %if.end ], [ null, %entry ]
+ ret ptr %retval.0
}
@hel = constant [4 x i8] c"hel\00"
@hello_u = constant [8 x i8] c"hello_u\00"
-declare i32 @bcmp(i8*, i8*, i32)
+declare i32 @bcmp(ptr, ptr, i32)
; Check bcmp(mem, mem, size) -> 0.
-define i32 @test_simplify1(i8* %mem, i32 %size) {
+define i32 @test_simplify1(ptr %mem, i32 %size) {
; CHECK-LABEL: @test_simplify1(
; CHECK-NEXT: ret i32 0
;
- %ret = call i32 @bcmp(i8* %mem, i8* %mem, i32 %size)
+ %ret = call i32 @bcmp(ptr %mem, ptr %mem, i32 %size)
ret i32 %ret
}
; Check bcmp(mem1, mem2, 0) -> 0.
-define i32 @test_simplify2(i8* %mem1, i8* %mem2) {
+define i32 @test_simplify2(ptr %mem1, ptr %mem2) {
; CHECK-LABEL: @test_simplify2(
; CHECK-NEXT: ret i32 0
;
- %ret = call i32 @bcmp(i8* %mem1, i8* %mem2, i32 0)
+ %ret = call i32 @bcmp(ptr %mem1, ptr %mem2, i32 0)
ret i32 %ret
}
;; Check bcmp(mem1, mem2, 1) -> *(unsigned char*)mem1 - *(unsigned char*)mem2.
-define i32 @test_simplify3(i8* %mem1, i8* %mem2) {
+define i32 @test_simplify3(ptr %mem1, ptr %mem2) {
; CHECK-LABEL: @test_simplify3(
-; CHECK-NEXT: [[LHSC:%.*]] = load i8, i8* [[MEM1:%.*]], align 1
+; CHECK-NEXT: [[LHSC:%.*]] = load i8, ptr [[MEM1:%.*]], align 1
; CHECK-NEXT: [[LHSV:%.*]] = zext i8 [[LHSC]] to i32
-; CHECK-NEXT: [[RHSC:%.*]] = load i8, i8* [[MEM2:%.*]], align 1
+; CHECK-NEXT: [[RHSC:%.*]] = load i8, ptr [[MEM2:%.*]], align 1
; CHECK-NEXT: [[RHSV:%.*]] = zext i8 [[RHSC]] to i32
; CHECK-NEXT: [[CHARDIFF:%.*]] = sub nsw i32 [[LHSV]], [[RHSV]]
; CHECK-NEXT: ret i32 [[CHARDIFF]]
;
- %ret = call i32 @bcmp(i8* %mem1, i8* %mem2, i32 1)
+ %ret = call i32 @bcmp(ptr %mem1, ptr %mem2, i32 1)
ret i32 %ret
}
; CHECK-LABEL: @test_simplify4(
; CHECK-NEXT: ret i32 0
;
- %mem1 = getelementptr [4 x i8], [4 x i8]* @hel, i32 0, i32 0
- %mem2 = getelementptr [8 x i8], [8 x i8]* @hello_u, i32 0, i32 0
- %ret = call i32 @bcmp(i8* %mem1, i8* %mem2, i32 3)
+ %ret = call i32 @bcmp(ptr @hel, ptr @hello_u, i32 3)
ret i32 %ret
}
; CHECK-LABEL: @test_simplify5(
; CHECK-NEXT: ret i32 1
;
- %mem1 = getelementptr [4 x i8], [4 x i8]* @hel, i32 0, i32 0
- %mem2 = getelementptr [4 x i8], [4 x i8]* @foo, i32 0, i32 0
- %ret = call i32 @bcmp(i8* %mem1, i8* %mem2, i32 3)
+ %ret = call i32 @bcmp(ptr @hel, ptr @foo, i32 3)
ret i32 %ret
}
; CHECK-LABEL: @test_simplify6(
; CHECK-NEXT: ret i32 -1
;
- %mem1 = getelementptr [4 x i8], [4 x i8]* @foo, i32 0, i32 0
- %mem2 = getelementptr [4 x i8], [4 x i8]* @hel, i32 0, i32 0
- %ret = call i32 @bcmp(i8* %mem1, i8* %mem2, i32 3)
+ %ret = call i32 @bcmp(ptr @foo, ptr @hel, i32 3)
ret i32 %ret
}
;
%x.addr = alloca i64, align 8
%y.addr = alloca i64, align 8
- store i64 %x, i64* %x.addr, align 8
- store i64 %y, i64* %y.addr, align 8
- %xptr = bitcast i64* %x.addr to i8*
- %yptr = bitcast i64* %y.addr to i8*
- %call = call i32 @bcmp(i8* %xptr, i8* %yptr, i32 8)
+ store i64 %x, ptr %x.addr, align 8
+ store i64 %y, ptr %y.addr, align 8
+ %call = call i32 @bcmp(ptr %x.addr, ptr %y.addr, i32 8)
%cmp = icmp eq i32 %call, 0
ret i1 %cmp
}
;
%x.addr = alloca i32, align 4
%y.addr = alloca i32, align 4
- store i32 %x, i32* %x.addr, align 4
- store i32 %y, i32* %y.addr, align 4
- %xptr = bitcast i32* %x.addr to i8*
- %yptr = bitcast i32* %y.addr to i8*
- %call = call i32 @bcmp(i8* %xptr, i8* %yptr, i32 4)
+ store i32 %x, ptr %x.addr, align 4
+ store i32 %y, ptr %y.addr, align 4
+ %call = call i32 @bcmp(ptr %x.addr, ptr %y.addr, i32 4)
%cmp = icmp eq i32 %call, 0
ret i1 %cmp
}
;
%x.addr = alloca i16, align 2
%y.addr = alloca i16, align 2
- store i16 %x, i16* %x.addr, align 2
- store i16 %y, i16* %y.addr, align 2
- %xptr = bitcast i16* %x.addr to i8*
- %yptr = bitcast i16* %y.addr to i8*
- %call = call i32 @bcmp(i8* %xptr, i8* %yptr, i32 2)
+ store i16 %x, ptr %x.addr, align 2
+ store i16 %y, ptr %y.addr, align 2
+ %call = call i32 @bcmp(ptr %x.addr, ptr %y.addr, i32 2)
%cmp = icmp eq i32 %call, 0
ret i1 %cmp
}
-define i1 @test_simplify10(i8* %mem1, i8* %mem2, i32 %size) {
+define i1 @test_simplify10(ptr %mem1, ptr %mem2, i32 %size) {
; CHECK-LABEL: @test_simplify10(
-; CHECK-NEXT: [[CALL:%.*]] = call i32 @bcmp(i8* [[MEM1:%.*]], i8* [[MEM2:%.*]], i32 [[SIZE:%.*]])
+; CHECK-NEXT: [[CALL:%.*]] = call i32 @bcmp(ptr [[MEM1:%.*]], ptr [[MEM2:%.*]], i32 [[SIZE:%.*]])
; CHECK-NEXT: [[CMP:%.*]] = icmp eq i32 [[CALL]], 0
; CHECK-NEXT: ret i1 [[CMP]]
;
- %call = call i32 @bcmp(i8* %mem1, i8* %mem2, i32 %size)
+ %call = call i32 @bcmp(ptr %mem1, ptr %mem2, i32 %size)
%cmp = icmp eq i32 %call, 0
ret i1 %cmp
}
; NOTE: Assertions have been autogenerated by utils/update_test_checks.py
; RUN: opt < %s -passes=instcombine -data-layout=p:32:32 -S | FileCheck %s
-declare void @bcopy(i8* nocapture readonly, i8* nocapture, i32)
+declare void @bcopy(ptr nocapture readonly, ptr nocapture, i32)
-define void @bcopy_memmove(i8* nocapture readonly %a, i8* nocapture %b) {
+define void @bcopy_memmove(ptr nocapture readonly %a, ptr nocapture %b) {
; CHECK-LABEL: @bcopy_memmove(
-; CHECK-NEXT: [[TMP1:%.*]] = bitcast i8* [[A:%.*]] to i64*
-; CHECK-NEXT: [[TMP2:%.*]] = bitcast i8* [[B:%.*]] to i64*
-; CHECK-NEXT: [[TMP3:%.*]] = load i64, i64* [[TMP1]], align 1
-; CHECK-NEXT: store i64 [[TMP3]], i64* [[TMP2]], align 1
+; CHECK-NEXT: [[TMP3:%.*]] = load i64, ptr [[A:%.*]], align 1
+; CHECK-NEXT: store i64 [[TMP3]], ptr [[B:%.*]], align 1
; CHECK-NEXT: ret void
;
- tail call void @bcopy(i8* %a, i8* %b, i32 8)
+ tail call void @bcopy(ptr %a, ptr %b, i32 8)
ret void
}
-define void @bcopy_memmove2(i8* nocapture readonly %a, i8* nocapture %b, i32 %len) {
+define void @bcopy_memmove2(ptr nocapture readonly %a, ptr nocapture %b, i32 %len) {
; CHECK-LABEL: @bcopy_memmove2(
-; CHECK-NEXT: call void @llvm.memmove.p0i8.p0i8.i32(i8* align 1 [[B:%.*]], i8* align 1 [[A:%.*]], i32 [[LEN:%.*]], i1 false)
+; CHECK-NEXT: call void @llvm.memmove.p0.p0.i32(ptr align 1 [[B:%.*]], ptr align 1 [[A:%.*]], i32 [[LEN:%.*]], i1 false)
; CHECK-NEXT: ret void
;
- tail call void @bcopy(i8* %a, i8* %b, i32 %len)
+ tail call void @bcopy(ptr %a, ptr %b, i32 %len)
ret void
}
ret i8 %r
}
-define { i64, i32 } @ParseRetVal(i1 %b, { i64, i32 } ()* %x) {
+define { i64, i32 } @ParseRetVal(i1 %b, ptr %x) {
; CHECK-LABEL: @ParseRetVal(
; CHECK-NEXT: entry:
; CHECK-NEXT: br i1 [[B:%.*]], label [[T:%.*]], label [[F:%.*]]
}
; PR17293
-define <2 x i64> @test7(<2 x i8*>* %arg) nounwind {
+define <2 x i64> @test7(ptr %arg) nounwind {
; CHECK-LABEL: @test7(
-; CHECK-NEXT: [[CAST:%.*]] = bitcast <2 x i8*>* [[ARG:%.*]] to <2 x i64>*
-; CHECK-NEXT: [[LOAD:%.*]] = load <2 x i64>, <2 x i64>* [[CAST]], align 16
+; CHECK-NEXT: [[LOAD:%.*]] = load <2 x i64>, ptr [[ARG:%.*]], align 16
; CHECK-NEXT: ret <2 x i64> [[LOAD]]
;
- %cast = bitcast <2 x i8*>* %arg to <2 x i64>*
- %load = load <2 x i64>, <2 x i64>* %cast, align 16
+ %load = load <2 x i64>, ptr %arg, align 16
ret <2 x i64> %load
}
define void @constant_fold_vector_to_double() {
; CHECK-LABEL: @constant_fold_vector_to_double(
-; CHECK-NEXT: store volatile double 1.000000e+00, double* undef, align 8
-; CHECK-NEXT: store volatile double 1.000000e+00, double* undef, align 8
-; CHECK-NEXT: store volatile double 1.000000e+00, double* undef, align 8
-; CHECK-NEXT: store volatile double 1.000000e+00, double* undef, align 8
-; CHECK-NEXT: store volatile double 0xFFFFFFFFFFFFFFFF, double* undef, align 8
-; CHECK-NEXT: store volatile double 0x162E000004D2, double* undef, align 8
-; CHECK-NEXT: store volatile double bitcast (<2 x i32> <i32 1234, i32 ptrtoint (i32* @g to i32)> to double), double* undef, align 8
-; CHECK-NEXT: store volatile double 0x400000003F800000, double* undef, align 8
-; CHECK-NEXT: store volatile double 0.000000e+00, double* undef, align 8
-; CHECK-NEXT: store volatile double 0.000000e+00, double* undef, align 8
-; CHECK-NEXT: store volatile double 0.000000e+00, double* undef, align 8
-; CHECK-NEXT: store volatile double 0.000000e+00, double* undef, align 8
-; CHECK-NEXT: store volatile double 0.000000e+00, double* undef, align 8
-; CHECK-NEXT: store volatile double 0.000000e+00, double* undef, align 8
+; CHECK-NEXT: store volatile double 1.000000e+00, ptr undef, align 8
+; CHECK-NEXT: store volatile double 1.000000e+00, ptr undef, align 8
+; CHECK-NEXT: store volatile double 1.000000e+00, ptr undef, align 8
+; CHECK-NEXT: store volatile double 1.000000e+00, ptr undef, align 8
+; CHECK-NEXT: store volatile double 0xFFFFFFFFFFFFFFFF, ptr undef, align 8
+; CHECK-NEXT: store volatile double 0x162E000004D2, ptr undef, align 8
+; CHECK-NEXT: store volatile double bitcast (<2 x i32> <i32 1234, i32 ptrtoint (ptr @g to i32)> to double), ptr undef, align 8
+; CHECK-NEXT: store volatile double 0x400000003F800000, ptr undef, align 8
+; CHECK-NEXT: store volatile double 0.000000e+00, ptr undef, align 8
+; CHECK-NEXT: store volatile double 0.000000e+00, ptr undef, align 8
+; CHECK-NEXT: store volatile double 0.000000e+00, ptr undef, align 8
+; CHECK-NEXT: store volatile double 0.000000e+00, ptr undef, align 8
+; CHECK-NEXT: store volatile double 0.000000e+00, ptr undef, align 8
+; CHECK-NEXT: store volatile double 0.000000e+00, ptr undef, align 8
; CHECK-NEXT: ret void
;
- store volatile double bitcast (<1 x i64> <i64 4607182418800017408> to double), double* undef
- store volatile double bitcast (<2 x i32> <i32 0, i32 1072693248> to double), double* undef
- store volatile double bitcast (<4 x i16> <i16 0, i16 0, i16 0, i16 16368> to double), double* undef
- store volatile double bitcast (<8 x i8> <i8 0, i8 0, i8 0, i8 0, i8 0, i8 0, i8 240, i8 63> to double), double* undef
+ store volatile double bitcast (<1 x i64> <i64 4607182418800017408> to double), ptr undef
+ store volatile double bitcast (<2 x i32> <i32 0, i32 1072693248> to double), ptr undef
+ store volatile double bitcast (<4 x i16> <i16 0, i16 0, i16 0, i16 16368> to double), ptr undef
+ store volatile double bitcast (<8 x i8> <i8 0, i8 0, i8 0, i8 0, i8 0, i8 0, i8 240, i8 63> to double), ptr undef
- store volatile double bitcast (<2 x i32> <i32 -1, i32 -1> to double), double* undef
- store volatile double bitcast (<2 x i32> <i32 1234, i32 5678> to double), double* undef
+ store volatile double bitcast (<2 x i32> <i32 -1, i32 -1> to double), ptr undef
+ store volatile double bitcast (<2 x i32> <i32 1234, i32 5678> to double), ptr undef
- store volatile double bitcast (<2 x i32> <i32 1234, i32 ptrtoint (i32* @g to i32)> to double), double* undef
- store volatile double bitcast (<2 x float> <float 1.0, float 2.0> to double), double* undef
+ store volatile double bitcast (<2 x i32> <i32 1234, i32 ptrtoint (ptr @g to i32)> to double), ptr undef
+ store volatile double bitcast (<2 x float> <float 1.0, float 2.0> to double), ptr undef
- store volatile double bitcast (<2 x i32> zeroinitializer to double), double* undef
- store volatile double bitcast (<4 x i16> zeroinitializer to double), double* undef
- store volatile double bitcast (<8 x i8> zeroinitializer to double), double* undef
- store volatile double bitcast (<16 x i4> zeroinitializer to double), double* undef
- store volatile double bitcast (<32 x i2> zeroinitializer to double), double* undef
- store volatile double bitcast (<64 x i1> zeroinitializer to double), double* undef
+ store volatile double bitcast (<2 x i32> zeroinitializer to double), ptr undef
+ store volatile double bitcast (<4 x i16> zeroinitializer to double), ptr undef
+ store volatile double bitcast (<8 x i8> zeroinitializer to double), ptr undef
+ store volatile double bitcast (<16 x i4> zeroinitializer to double), ptr undef
+ store volatile double bitcast (<32 x i2> zeroinitializer to double), ptr undef
+ store volatile double bitcast (<64 x i1> zeroinitializer to double), ptr undef
ret void
}
define void @constant_fold_vector_to_float() {
; CHECK-LABEL: @constant_fold_vector_to_float(
-; CHECK-NEXT: store volatile float 1.000000e+00, float* undef, align 4
-; CHECK-NEXT: store volatile float 1.000000e+00, float* undef, align 4
-; CHECK-NEXT: store volatile float 1.000000e+00, float* undef, align 4
-; CHECK-NEXT: store volatile float 1.000000e+00, float* undef, align 4
+; CHECK-NEXT: store volatile float 1.000000e+00, ptr undef, align 4
+; CHECK-NEXT: store volatile float 1.000000e+00, ptr undef, align 4
+; CHECK-NEXT: store volatile float 1.000000e+00, ptr undef, align 4
+; CHECK-NEXT: store volatile float 1.000000e+00, ptr undef, align 4
; CHECK-NEXT: ret void
;
- store volatile float bitcast (<1 x i32> <i32 1065353216> to float), float* undef
- store volatile float bitcast (<2 x i16> <i16 0, i16 16256> to float), float* undef
- store volatile float bitcast (<4 x i8> <i8 0, i8 0, i8 128, i8 63> to float), float* undef
- store volatile float bitcast (<32 x i1> <i1 0, i1 0, i1 0, i1 0, i1 0, i1 0, i1 0, i1 0, i1 0, i1 0, i1 0, i1 0, i1 0, i1 0, i1 0, i1 0, i1 0, i1 0, i1 0, i1 0, i1 0, i1 0, i1 0, i1 1, i1 1, i1 1, i1 1, i1 1, i1 1, i1 1, i1 0, i1 0> to float), float* undef
+ store volatile float bitcast (<1 x i32> <i32 1065353216> to float), ptr undef
+ store volatile float bitcast (<2 x i16> <i16 0, i16 16256> to float), ptr undef
+ store volatile float bitcast (<4 x i8> <i8 0, i8 0, i8 128, i8 63> to float), ptr undef
+ store volatile float bitcast (<32 x i1> <i1 0, i1 0, i1 0, i1 0, i1 0, i1 0, i1 0, i1 0, i1 0, i1 0, i1 0, i1 0, i1 0, i1 0, i1 0, i1 0, i1 0, i1 0, i1 0, i1 0, i1 0, i1 0, i1 0, i1 1, i1 1, i1 1, i1 1, i1 1, i1 1, i1 1, i1 0, i1 0> to float), ptr undef
ret void
}
define void @constant_fold_vector_to_half() {
; CHECK-LABEL: @constant_fold_vector_to_half(
-; CHECK-NEXT: store volatile half 0xH4000, half* undef, align 2
-; CHECK-NEXT: store volatile half 0xH4000, half* undef, align 2
+; CHECK-NEXT: store volatile half 0xH4000, ptr undef, align 2
+; CHECK-NEXT: store volatile half 0xH4000, ptr undef, align 2
; CHECK-NEXT: ret void
;
- store volatile half bitcast (<2 x i8> <i8 0, i8 64> to half), half* undef
- store volatile half bitcast (<4 x i4> <i4 0, i4 0, i4 0, i4 4> to half), half* undef
+ store volatile half bitcast (<2 x i8> <i8 0, i8 64> to half), ptr undef
+ store volatile half bitcast (<4 x i4> <i4 0, i4 0, i4 0, i4 4> to half), ptr undef
ret void
}
; Ensure that we do not crash when looking at such a weird bitcast.
-define i8* @bitcast_from_single_element_pointer_vector_to_pointer(<1 x i8*> %ptrvec) {
+define ptr @bitcast_from_single_element_pointer_vector_to_pointer(<1 x ptr> %ptrvec) {
; CHECK-LABEL: @bitcast_from_single_element_pointer_vector_to_pointer(
-; CHECK-NEXT: [[TMP1:%.*]] = extractelement <1 x i8*> [[PTRVEC:%.*]], i64 0
-; CHECK-NEXT: ret i8* [[TMP1]]
+; CHECK-NEXT: [[TMP1:%.*]] = extractelement <1 x ptr> [[PTRVEC:%.*]], i64 0
+; CHECK-NEXT: ret ptr [[TMP1]]
;
- %ptr = bitcast <1 x i8*> %ptrvec to i8*
- ret i8* %ptr
+ %ptr = bitcast <1 x ptr> %ptrvec to ptr
+ ret ptr %ptr
}
@Q = internal unnamed_addr global double 1.000000e+00, align 8
-define double @test(i1 %c, i64* %p) {
+define double @test(i1 %c, ptr %p) {
; CHECK-LABEL: @test(
; CHECK-NEXT: entry:
; CHECK-NEXT: br i1 [[C:%.*]], label [[IF:%.*]], label [[END:%.*]]
; CHECK: if:
-; CHECK-NEXT: [[LOAD1:%.*]] = load double, double* @Q, align 8
+; CHECK-NEXT: [[LOAD1:%.*]] = load double, ptr @Q, align 8
; CHECK-NEXT: br label [[END]]
; CHECK: end:
; CHECK-NEXT: [[TMP0:%.*]] = phi double [ 0.000000e+00, [[ENTRY:%.*]] ], [ [[LOAD1]], [[IF]] ]
-; CHECK-NEXT: [[TMP1:%.*]] = bitcast i64* [[P:%.*]] to double*
-; CHECK-NEXT: store double [[TMP0]], double* [[TMP1]], align 8
+; CHECK-NEXT: store double [[TMP0]], ptr [[P:%.*]], align 8
; CHECK-NEXT: ret double [[TMP0]]
;
entry:
br i1 %c, label %if, label %end
if:
- %load = load i64, i64* bitcast (double* @Q to i64*), align 8
+ %load = load i64, ptr @Q, align 8
br label %end
end:
%phi = phi i64 [ 0, %entry ], [ %load, %if ]
- store i64 %phi, i64* %p, align 8
+ store i64 %phi, ptr %p, align 8
%cast = bitcast i64 %phi to double
ret double %cast
}
; PR17293
-define <2 x i64> @test7(<2 x i8*>* %arg) nounwind {
+define <2 x i64> @test7(ptr %arg) nounwind {
; CHECK-LABEL: @test7(
-; CHECK-NEXT: [[CAST:%.*]] = bitcast <2 x i8*>* [[ARG:%.*]] to <2 x i64>*
-; CHECK-NEXT: [[LOAD:%.*]] = load <2 x i64>, <2 x i64>* [[CAST]], align 16
+; CHECK-NEXT: [[LOAD:%.*]] = load <2 x i64>, ptr [[ARG:%.*]], align 16
; CHECK-NEXT: ret <2 x i64> [[LOAD]]
;
- %cast = bitcast <2 x i8*>* %arg to <2 x i64>*
- %load = load <2 x i64>, <2 x i64>* %cast, align 16
+ %load = load <2 x i64>, ptr %arg, align 16
ret <2 x i64> %load
}
define void @constant_fold_vector_to_double() {
; CHECK-LABEL: @constant_fold_vector_to_double(
-; CHECK-NEXT: store volatile double 1.000000e+00, double* undef, align 8
-; CHECK-NEXT: store volatile double 1.000000e+00, double* undef, align 8
-; CHECK-NEXT: store volatile double 1.000000e+00, double* undef, align 8
-; CHECK-NEXT: store volatile double 1.000000e+00, double* undef, align 8
-; CHECK-NEXT: store volatile double 0xFFFFFFFFFFFFFFFF, double* undef, align 8
-; CHECK-NEXT: store volatile double 0x162E000004D2, double* undef, align 8
-; CHECK-NEXT: store volatile double bitcast (<2 x i32> <i32 1234, i32 ptrtoint (i32* @g to i32)> to double), double* undef, align 8
-; CHECK-NEXT: store volatile double 0x400000003F800000, double* undef, align 8
-; CHECK-NEXT: store volatile double 0.000000e+00, double* undef, align 8
-; CHECK-NEXT: store volatile double 0.000000e+00, double* undef, align 8
-; CHECK-NEXT: store volatile double 0.000000e+00, double* undef, align 8
-; CHECK-NEXT: store volatile double 0.000000e+00, double* undef, align 8
-; CHECK-NEXT: store volatile double 0.000000e+00, double* undef, align 8
-; CHECK-NEXT: store volatile double 0.000000e+00, double* undef, align 8
+; CHECK-NEXT: store volatile double 1.000000e+00, ptr undef, align 8
+; CHECK-NEXT: store volatile double 1.000000e+00, ptr undef, align 8
+; CHECK-NEXT: store volatile double 1.000000e+00, ptr undef, align 8
+; CHECK-NEXT: store volatile double 1.000000e+00, ptr undef, align 8
+; CHECK-NEXT: store volatile double 0xFFFFFFFFFFFFFFFF, ptr undef, align 8
+; CHECK-NEXT: store volatile double 0x162E000004D2, ptr undef, align 8
+; CHECK-NEXT: store volatile double bitcast (<2 x i32> <i32 1234, i32 ptrtoint (ptr @g to i32)> to double), ptr undef, align 8
+; CHECK-NEXT: store volatile double 0x400000003F800000, ptr undef, align 8
+; CHECK-NEXT: store volatile double 0.000000e+00, ptr undef, align 8
+; CHECK-NEXT: store volatile double 0.000000e+00, ptr undef, align 8
+; CHECK-NEXT: store volatile double 0.000000e+00, ptr undef, align 8
+; CHECK-NEXT: store volatile double 0.000000e+00, ptr undef, align 8
+; CHECK-NEXT: store volatile double 0.000000e+00, ptr undef, align 8
+; CHECK-NEXT: store volatile double 0.000000e+00, ptr undef, align 8
; CHECK-NEXT: ret void
;
- store volatile double bitcast (<1 x i64> <i64 4607182418800017408> to double), double* undef
- store volatile double bitcast (<2 x i32> <i32 0, i32 1072693248> to double), double* undef
- store volatile double bitcast (<4 x i16> <i16 0, i16 0, i16 0, i16 16368> to double), double* undef
- store volatile double bitcast (<8 x i8> <i8 0, i8 0, i8 0, i8 0, i8 0, i8 0, i8 240, i8 63> to double), double* undef
+ store volatile double bitcast (<1 x i64> <i64 4607182418800017408> to double), ptr undef
+ store volatile double bitcast (<2 x i32> <i32 0, i32 1072693248> to double), ptr undef
+ store volatile double bitcast (<4 x i16> <i16 0, i16 0, i16 0, i16 16368> to double), ptr undef
+ store volatile double bitcast (<8 x i8> <i8 0, i8 0, i8 0, i8 0, i8 0, i8 0, i8 240, i8 63> to double), ptr undef
- store volatile double bitcast (<2 x i32> <i32 -1, i32 -1> to double), double* undef
- store volatile double bitcast (<2 x i32> <i32 1234, i32 5678> to double), double* undef
+ store volatile double bitcast (<2 x i32> <i32 -1, i32 -1> to double), ptr undef
+ store volatile double bitcast (<2 x i32> <i32 1234, i32 5678> to double), ptr undef
- store volatile double bitcast (<2 x i32> <i32 1234, i32 ptrtoint (i32* @g to i32)> to double), double* undef
- store volatile double bitcast (<2 x float> <float 1.0, float 2.0> to double), double* undef
+ store volatile double bitcast (<2 x i32> <i32 1234, i32 ptrtoint (ptr @g to i32)> to double), ptr undef
+ store volatile double bitcast (<2 x float> <float 1.0, float 2.0> to double), ptr undef
- store volatile double bitcast (<2 x i32> zeroinitializer to double), double* undef
- store volatile double bitcast (<4 x i16> zeroinitializer to double), double* undef
- store volatile double bitcast (<8 x i8> zeroinitializer to double), double* undef
- store volatile double bitcast (<16 x i4> zeroinitializer to double), double* undef
- store volatile double bitcast (<32 x i2> zeroinitializer to double), double* undef
- store volatile double bitcast (<64 x i1> zeroinitializer to double), double* undef
+ store volatile double bitcast (<2 x i32> zeroinitializer to double), ptr undef
+ store volatile double bitcast (<4 x i16> zeroinitializer to double), ptr undef
+ store volatile double bitcast (<8 x i8> zeroinitializer to double), ptr undef
+ store volatile double bitcast (<16 x i4> zeroinitializer to double), ptr undef
+ store volatile double bitcast (<32 x i2> zeroinitializer to double), ptr undef
+ store volatile double bitcast (<64 x i1> zeroinitializer to double), ptr undef
ret void
}
define void @constant_fold_vector_to_float() {
; CHECK-LABEL: @constant_fold_vector_to_float(
-; CHECK-NEXT: store volatile float 1.000000e+00, float* undef, align 4
-; CHECK-NEXT: store volatile float 1.000000e+00, float* undef, align 4
-; CHECK-NEXT: store volatile float 1.000000e+00, float* undef, align 4
-; CHECK-NEXT: store volatile float 1.000000e+00, float* undef, align 4
+; CHECK-NEXT: store volatile float 1.000000e+00, ptr undef, align 4
+; CHECK-NEXT: store volatile float 1.000000e+00, ptr undef, align 4
+; CHECK-NEXT: store volatile float 1.000000e+00, ptr undef, align 4
+; CHECK-NEXT: store volatile float 1.000000e+00, ptr undef, align 4
; CHECK-NEXT: ret void
;
- store volatile float bitcast (<1 x i32> <i32 1065353216> to float), float* undef
- store volatile float bitcast (<2 x i16> <i16 0, i16 16256> to float), float* undef
- store volatile float bitcast (<4 x i8> <i8 0, i8 0, i8 128, i8 63> to float), float* undef
- store volatile float bitcast (<32 x i1> <i1 0, i1 0, i1 0, i1 0, i1 0, i1 0, i1 0, i1 0, i1 0, i1 0, i1 0, i1 0, i1 0, i1 0, i1 0, i1 0, i1 0, i1 0, i1 0, i1 0, i1 0, i1 0, i1 0, i1 1, i1 1, i1 1, i1 1, i1 1, i1 1, i1 1, i1 0, i1 0> to float), float* undef
+ store volatile float bitcast (<1 x i32> <i32 1065353216> to float), ptr undef
+ store volatile float bitcast (<2 x i16> <i16 0, i16 16256> to float), ptr undef
+ store volatile float bitcast (<4 x i8> <i8 0, i8 0, i8 128, i8 63> to float), ptr undef
+ store volatile float bitcast (<32 x i1> <i1 0, i1 0, i1 0, i1 0, i1 0, i1 0, i1 0, i1 0, i1 0, i1 0, i1 0, i1 0, i1 0, i1 0, i1 0, i1 0, i1 0, i1 0, i1 0, i1 0, i1 0, i1 0, i1 0, i1 1, i1 1, i1 1, i1 1, i1 1, i1 1, i1 1, i1 0, i1 0> to float), ptr undef
ret void
}
define void @constant_fold_vector_to_half() {
; CHECK-LABEL: @constant_fold_vector_to_half(
-; CHECK-NEXT: store volatile half 0xH4000, half* undef, align 2
-; CHECK-NEXT: store volatile half 0xH4000, half* undef, align 2
+; CHECK-NEXT: store volatile half 0xH4000, ptr undef, align 2
+; CHECK-NEXT: store volatile half 0xH4000, ptr undef, align 2
; CHECK-NEXT: ret void
;
- store volatile half bitcast (<2 x i8> <i8 0, i8 64> to half), half* undef
- store volatile half bitcast (<4 x i4> <i4 0, i4 0, i4 0, i4 4> to half), half* undef
+ store volatile half bitcast (<2 x i8> <i8 0, i8 64> to half), ptr undef
+ store volatile half bitcast (<4 x i4> <i4 0, i4 0, i4 0, i4 4> to half), ptr undef
ret void
}
; Ensure that we do not crash when looking at such a weird bitcast.
-define i8* @bitcast_from_single_element_pointer_vector_to_pointer(<1 x i8*> %ptrvec) {
+define ptr @bitcast_from_single_element_pointer_vector_to_pointer(<1 x ptr> %ptrvec) {
; CHECK-LABEL: @bitcast_from_single_element_pointer_vector_to_pointer(
-; CHECK-NEXT: [[TMP1:%.*]] = extractelement <1 x i8*> [[PTRVEC:%.*]], i64 0
-; CHECK-NEXT: ret i8* [[TMP1]]
+; CHECK-NEXT: [[TMP1:%.*]] = extractelement <1 x ptr> [[PTRVEC:%.*]], i64 0
+; CHECK-NEXT: ret ptr [[TMP1]]
;
- %ptr = bitcast <1 x i8*> %ptrvec to i8*
- ret i8* %ptr
+ %ptr = bitcast <1 x ptr> %ptrvec to ptr
+ ret ptr %ptr
}
declare void @f1()
declare void @f2()
-define i8* @select_bitcast_unsized_pointer(i1 %c) {
+define ptr @select_bitcast_unsized_pointer(i1 %c) {
; CHECK-LABEL: @select_bitcast_unsized_pointer(
-; CHECK-NEXT: [[B:%.*]] = select i1 [[C:%.*]], i8* bitcast (void ()* @f1 to i8*), i8* bitcast (void ()* @f2 to i8*)
-; CHECK-NEXT: ret i8* [[B]]
+; CHECK-NEXT: [[B:%.*]] = select i1 [[C:%.*]], ptr @f1, ptr @f2
+; CHECK-NEXT: ret ptr [[B]]
;
- %s = select i1 %c, void ()* @f1, void ()* @f2
- %b = bitcast void ()* %s to i8*
- ret i8* %b
+ %s = select i1 %c, ptr @f1, ptr @f2
+ ret ptr %s
}
; CHECK: define i32 @fn1
define i32 @fn1() #0 {
entry:
- %b.promoted = load i32, i32* @b, align 4, !tbaa !2
+ %b.promoted = load i32, ptr @b, align 4, !tbaa !2
br label %for.body
for.body: ; preds = %for.body, %entry
br i1 %exitcond, label %for.end, label %for.body
for.end: ; preds = %for.body
- store i32 %or, i32* @b, align 4, !tbaa !2
+ store i32 %or, ptr @b, align 4, !tbaa !2
ret i32 undef
}
ret i4 %cast
}
-define i4 @shuf_load_4bits(<4 x i1> * %p) {
+define i4 @shuf_load_4bits(ptr %p) {
; CHECK-LABEL: @shuf_load_4bits(
-; CHECK-NEXT: [[TMP1:%.*]] = bitcast <4 x i1>* [[P:%.*]] to i4*
-; CHECK-NEXT: [[X1:%.*]] = load i4, i4* [[TMP1]], align 1
+; CHECK-NEXT: [[X1:%.*]] = load i4, ptr [[P:%.*]], align 1
; CHECK-NEXT: [[CAST:%.*]] = call i4 @llvm.bitreverse.i4(i4 [[X1]])
; CHECK-NEXT: ret i4 [[CAST]]
;
- %x = load <4 x i1>, <4 x i1>* %p
+ %x = load <4 x i1>, ptr %p
%bitreverse = shufflevector <4 x i1> %x, <4 x i1> undef, <4 x i32> <i32 3, i32 2, i32 1, i32 0>
%cast = bitcast <4 x i1> %bitreverse to i4
ret i4 %cast
; RUN: opt < %s -instcombine -simplifycfg -simplifycfg-require-and-preserve-domtree=1 -S |\
; RUN: not grep "call void @abort"
-@b_rec.0 = external global i32 ; <i32*> [#uses=2]
+@b_rec.0 = external global i32 ; <ptr> [#uses=2]
-define void @_Z12h000007_testv(i32* %P) {
+define void @_Z12h000007_testv(ptr %P) {
entry:
- %tmp.2 = load i32, i32* @b_rec.0 ; <i32> [#uses=1]
+ %tmp.2 = load i32, ptr @b_rec.0 ; <i32> [#uses=1]
%tmp.9 = or i32 %tmp.2, -989855744 ; <i32> [#uses=2]
%tmp.16 = and i32 %tmp.9, -805306369 ; <i32> [#uses=2]
%tmp.17 = and i32 %tmp.9, -973078529 ; <i32> [#uses=1]
- store i32 %tmp.17, i32* @b_rec.0
+ store i32 %tmp.17, ptr @b_rec.0
%tmp.17.shrunk = bitcast i32 %tmp.16 to i32 ; <i32> [#uses=1]
%tmp.22 = and i32 %tmp.17.shrunk, -1073741824 ; <i32> [#uses=1]
%tmp.23 = icmp eq i32 %tmp.22, -1073741824 ; <i1> [#uses=1]
endif.0: ; preds = %entry
%tmp.17.shrunk2 = bitcast i32 %tmp.16 to i32 ; <i32> [#uses=1]
%tmp.27.mask = and i32 %tmp.17.shrunk2, 100663295 ; <i32> [#uses=1]
- store i32 %tmp.27.mask, i32* %P
+ store i32 %tmp.27.mask, ptr %P
ret void
}
; CHECK: patatino:
; CHECK-NEXT: ret i32 [[X:%.*]]
;
- %y = icmp eq i32 27, ptrtoint(i8* @global to i32)
+ %y = icmp eq i32 27, ptrtoint(ptr @global to i32)
br i1 %y, label %patatino, label %patatino
patatino:
ret i32 %x
; negative test - uses
-define i32 @lshr8_i32_use(i32 %x, i32* %p) {
+define i32 @lshr8_i32_use(i32 %x, ptr %p) {
; CHECK-LABEL: @lshr8_i32_use(
; CHECK-NEXT: [[S:%.*]] = lshr i32 [[X:%.*]], 12
-; CHECK-NEXT: store i32 [[S]], i32* [[P:%.*]], align 4
+; CHECK-NEXT: store i32 [[S]], ptr [[P:%.*]], align 4
; CHECK-NEXT: [[R:%.*]] = call i32 @llvm.bswap.i32(i32 [[S]])
; CHECK-NEXT: ret i32 [[R]]
;
%s = lshr i32 %x, 12
- store i32 %s, i32* %p
+ store i32 %s, ptr %p
%r = call i32 @llvm.bswap.i32(i32 %s)
ret i32 %r
}
; negative test - uses
-define i32 @shl8_i32_use(i32 %x, i32* %p) {
+define i32 @shl8_i32_use(i32 %x, ptr %p) {
; CHECK-LABEL: @shl8_i32_use(
; CHECK-NEXT: [[S:%.*]] = shl i32 [[X:%.*]], 8
-; CHECK-NEXT: store i32 [[S]], i32* [[P:%.*]], align 4
+; CHECK-NEXT: store i32 [[S]], ptr [[P:%.*]], align 4
; CHECK-NEXT: [[R:%.*]] = call i32 @llvm.bswap.i32(i32 [[S]])
; CHECK-NEXT: ret i32 [[R]]
;
%s = shl i32 %x, 8
- store i32 %s, i32* %p
+ store i32 %s, ptr %p
%r = call i32 @llvm.bswap.i32(i32 %s)
ret i32 %r
}
ret i32 %cast
}
-define i32 @shuf_load_4bytes(<4 x i8>* %p) {
+define i32 @shuf_load_4bytes(ptr %p) {
; CHECK-LABEL: @shuf_load_4bytes(
-; CHECK-NEXT: [[TMP1:%.*]] = bitcast <4 x i8>* [[P:%.*]] to i32*
-; CHECK-NEXT: [[X1:%.*]] = load i32, i32* [[TMP1]], align 4
+; CHECK-NEXT: [[X1:%.*]] = load i32, ptr [[P:%.*]], align 4
; CHECK-NEXT: [[CAST:%.*]] = call i32 @llvm.bswap.i32(i32 [[X1]])
; CHECK-NEXT: ret i32 [[CAST]]
;
- %x = load <4 x i8>, <4 x i8>* %p
+ %x = load <4 x i8>, ptr %p
%bswap = shufflevector <4 x i8> %x, <4 x i8> poison, <4 x i32> <i32 3, i32 2, i32 undef, i32 0>
%cast = bitcast <4 x i8> %bswap to i32
ret i32 %cast
ret i32 %cast
}
-define i32 @shuf_load_4bytes(<4 x i8>* %p) {
+define i32 @shuf_load_4bytes(ptr %p) {
; CHECK-LABEL: @shuf_load_4bytes(
-; CHECK-NEXT: [[TMP1:%.*]] = bitcast <4 x i8>* [[P:%.*]] to i32*
-; CHECK-NEXT: [[X1:%.*]] = load i32, i32* [[TMP1]], align 4
+; CHECK-NEXT: [[X1:%.*]] = load i32, ptr [[P:%.*]], align 4
; CHECK-NEXT: [[CAST:%.*]] = call i32 @llvm.bswap.i32(i32 [[X1]])
; CHECK-NEXT: ret i32 [[CAST]]
;
- %x = load <4 x i8>, <4 x i8>* %p
+ %x = load <4 x i8>, ptr %p
%bswap = shufflevector <4 x i8> %x, <4 x i8> undef, <4 x i32> <i32 3, i32 2, i32 undef, i32 0>
%cast = bitcast <4 x i8> %bswap to i32
ret i32 %cast
}
; Don't attempt to collectBitParts from >128 bit integers
-define i16 @trunc_bswap_i160(i160* %a0) {
+define i16 @trunc_bswap_i160(ptr %a0) {
; CHECK-LABEL: @trunc_bswap_i160(
-; CHECK-NEXT: [[LOAD:%.*]] = load i160, i160* [[A0:%.*]], align 4
+; CHECK-NEXT: [[LOAD:%.*]] = load i160, ptr [[A0:%.*]], align 4
; CHECK-NEXT: [[LSHR1:%.*]] = lshr i160 [[LOAD]], 136
; CHECK-NEXT: [[CAST1:%.*]] = trunc i160 [[LSHR1]] to i16
; CHECK-NEXT: [[AND1:%.*]] = and i16 [[CAST1]], 255
; CHECK-NEXT: [[OR:%.*]] = or i16 [[AND1]], [[SHL]]
; CHECK-NEXT: ret i16 [[OR]]
;
- %load = load i160, i160* %a0, align 4
+ %load = load i160, ptr %a0, align 4
%lshr0 = lshr i160 %load, 128
%lshr1 = lshr i160 %load, 136
%cast0 = trunc i160 %lshr0 to i16
; Function Attrs: nounwind ssp uwtable
define i64 @weird_identity_but_ok(i64 %sz) {
entry:
- %call = tail call i8* @malloc(i64 %sz)
- %calc_size = tail call i64 @llvm.objectsize.i64.p0i8(i8* %call, i1 false, i1 true, i1 true)
- tail call void @free(i8* %call)
+ %call = tail call ptr @malloc(i64 %sz)
+ %calc_size = tail call i64 @llvm.objectsize.i64.p0(ptr %call, i1 false, i1 true, i1 true)
+ tail call void @free(ptr %call)
ret i64 %calc_size
}
br i1 %which, label %first_label, label %second_label
first_label:
- %first_call = call i8* @malloc(i64 10)
+ %first_call = call ptr @malloc(i64 10)
br label %join_label
second_label:
- %second_call = call i8* @malloc(i64 30)
+ %second_call = call ptr @malloc(i64 30)
br label %join_label
join_label:
- %joined = phi i8* [ %first_call, %first_label ], [ %second_call, %second_label ]
- %calc_size = tail call i64 @llvm.objectsize.i64.p0i8(i8* %joined, i1 false, i1 true, i1 true)
+ %joined = phi ptr [ %first_call, %first_label ], [ %second_call, %second_label ]
+ %calc_size = tail call i64 @llvm.objectsize.i64.p0(ptr %joined, i1 false, i1 true, i1 true)
ret i64 %calc_size
}
define i64 @internal_pointer(i64 %sz) {
entry:
- %ptr = call i8* @malloc(i64 %sz)
- %ptr2 = getelementptr inbounds i8, i8* %ptr, i32 2
- %calc_size = call i64 @llvm.objectsize.i64.p0i8(i8* %ptr2, i1 false, i1 true, i1 true)
+ %ptr = call ptr @malloc(i64 %sz)
+ %ptr2 = getelementptr inbounds i8, ptr %ptr, i32 2
+ %calc_size = call i64 @llvm.objectsize.i64.p0(ptr %ptr2, i1 false, i1 true, i1 true)
ret i64 %calc_size
}
define i64 @uses_nullptr_no_fold() {
entry:
- %res = call i64 @llvm.objectsize.i64.p0i8(i8* null, i1 false, i1 true, i1 true)
+ %res = call i64 @llvm.objectsize.i64.p0(ptr null, i1 false, i1 true, i1 true)
ret i64 %res
}
-; CHECK: %res = call i64 @llvm.objectsize.i64.p0i8(i8* null, i1 false, i1 true, i1 true)
+; CHECK: %res = call i64 @llvm.objectsize.i64.p0(ptr null, i1 false, i1 true, i1 true)
define i64 @uses_nullptr_fold() {
entry:
; NOTE: the third parameter to this call is false, unlike above.
- %res = call i64 @llvm.objectsize.i64.p0i8(i8* null, i1 false, i1 false, i1 true)
+ %res = call i64 @llvm.objectsize.i64.p0(ptr null, i1 false, i1 false, i1 true)
ret i64 %res
}
; Function Attrs: nounwind
define void @f() {
entry:
- %.pr = load i32, i32* @c, align 4
+ %.pr = load i32, ptr @c, align 4
%tobool4 = icmp eq i32 %.pr, 0
br i1 %tobool4, label %for.end, label %for.body
for.body: ; preds = %entry, %for.body
- %dp.05 = phi i8* [ %add.ptr, %for.body ], [ @d, %entry ]
- %0 = tail call i64 @llvm.objectsize.i64.p0i8(i8* %dp.05, i1 false, i1 true, i1 true)
+ %dp.05 = phi ptr [ %add.ptr, %for.body ], [ @d, %entry ]
+ %0 = tail call i64 @llvm.objectsize.i64.p0(ptr %dp.05, i1 false, i1 true, i1 true)
%conv = trunc i64 %0 to i32
tail call void @bury(i32 %conv) #3
- %1 = load i32, i32* @c, align 4
+ %1 = load i32, ptr @c, align 4
%idx.ext = sext i32 %1 to i64
%add.ptr.offs = add i64 %idx.ext, 0
%2 = add i64 undef, %add.ptr.offs
- %add.ptr = getelementptr inbounds i8, i8* %dp.05, i64 %idx.ext
+ %add.ptr = getelementptr inbounds i8, ptr %dp.05, i64 %idx.ext
%add = shl nsw i32 %1, 1
- store i32 %add, i32* @c, align 4
+ store i32 %add, ptr @c, align 4
%tobool = icmp eq i32 %1, 0
br i1 %tobool, label %for.end, label %for.body
}
; CHECK: define void @f()
-; CHECK: call i64 @llvm.objectsize.i64.p0i8(
+; CHECK: call i64 @llvm.objectsize.i64.p0(
define void @bdos_cmpm1(i64 %alloc) {
entry:
- %obj = call i8* @malloc(i64 %alloc)
- %objsize = call i64 @llvm.objectsize.i64.p0i8(i8* %obj, i1 0, i1 0, i1 1)
+ %obj = call ptr @malloc(i64 %alloc)
+ %objsize = call i64 @llvm.objectsize.i64.p0(ptr %obj, i1 0, i1 0, i1 1)
%cmp.not = icmp eq i64 %objsize, -1
br i1 %cmp.not, label %if.else, label %if.then
if.then:
- call void @fortified_chk(i8* %obj, i64 %objsize)
+ call void @fortified_chk(ptr %obj, i64 %objsize)
br label %if.end
if.else:
- call void @unfortified(i8* %obj, i64 %objsize)
+ call void @unfortified(ptr %obj, i64 %objsize)
br label %if.end
if.end: ; preds = %if.else, %if.then
; CHECK: [[TMP:%.*]] = icmp ne i64 %alloc, -1
; CHECK-NEXT: call void @llvm.assume(i1 [[TMP]])
; CHECK-NEXT: br i1 false, label %if.else, label %if.then
-; CHECK: call void @fortified_chk(i8* %obj, i64 %alloc)
+; CHECK: call void @fortified_chk(ptr %obj, i64 %alloc)
define void @bdos_cmpm1_expr(i64 %alloc, i64 %part) {
entry:
%sz = udiv i64 %alloc, %part
- %obj = call i8* @malloc(i64 %sz)
- %objsize = call i64 @llvm.objectsize.i64.p0i8(i8* %obj, i1 0, i1 0, i1 1)
+ %obj = call ptr @malloc(i64 %sz)
+ %objsize = call i64 @llvm.objectsize.i64.p0(ptr %obj, i1 0, i1 0, i1 1)
%cmp.not = icmp eq i64 %objsize, -1
br i1 %cmp.not, label %if.else, label %if.then
if.then:
- call void @fortified_chk(i8* %obj, i64 %objsize)
+ call void @fortified_chk(ptr %obj, i64 %objsize)
br label %if.end
if.else:
- call void @unfortified(i8* %obj, i64 %objsize)
+ call void @unfortified(ptr %obj, i64 %objsize)
br label %if.end
if.end: ; preds = %if.else, %if.then
; CHECK: [[TMP:%.*]] = icmp ne i64 [[SZ:%.*]], -1
; CHECK-NEXT: call void @llvm.assume(i1 [[TMP]])
; CHECK-NEXT: br i1 false, label %if.else, label %if.then
-; CHECK: call void @fortified_chk(i8* %obj, i64 [[SZ]])
+; CHECK: call void @fortified_chk(ptr %obj, i64 [[SZ]])
@p7 = internal addrspace(7) global i8 0
; CHECK: ret i64 [[TMP1]]
;
entry:
- %p0 = tail call i8* @malloc(i64 64)
- %gep = getelementptr i8, i8 addrspace(7)* @p7, i32 1
- %as = addrspacecast i8 addrspace(7)* %gep to i8*
- %select = select i1 %c, i8* %p0, i8* %as
- %calc_size = tail call i64 @llvm.objectsize.i64.p0i8(i8* %select, i1 false, i1 true, i1 true)
+ %p0 = tail call ptr @malloc(i64 64)
+ %gep = getelementptr i8, ptr addrspace(7) @p7, i32 1
+ %as = addrspacecast ptr addrspace(7) %gep to ptr
+ %select = select i1 %c, ptr %p0, ptr %as
+ %calc_size = tail call i64 @llvm.objectsize.i64.p0(ptr %select, i1 false, i1 true, i1 true)
ret i64 %calc_size
}
; CHECK: ret i64 [[TMP1]]
;
entry:
- %p0 = tail call i8* @malloc(i64 64)
- %select = select i1 %c, i8* %p0, i8* addrspacecast (i8 addrspace(7)* getelementptr (i8, i8 addrspace(7)* @p7, i32 1) to i8*)
- %calc_size = tail call i64 @llvm.objectsize.i64.p0i8(i8* %select, i1 false, i1 true, i1 true)
+ %p0 = tail call ptr @malloc(i64 64)
+ %select = select i1 %c, ptr %p0, ptr addrspacecast (ptr addrspace(7) getelementptr (i8, ptr addrspace(7) @p7, i32 1) to ptr)
+ %calc_size = tail call i64 @llvm.objectsize.i64.p0(ptr %select, i1 false, i1 true, i1 true)
ret i64 %calc_size
}
declare void @bury(i32) local_unnamed_addr #2
; Function Attrs: nounwind allocsize(0)
-declare i8* @malloc(i64) nounwind allocsize(0) allockind("alloc,uninitialized") "alloc-family"="malloc"
+declare ptr @malloc(i64) nounwind allocsize(0) allockind("alloc,uninitialized") "alloc-family"="malloc"
-declare i8* @get_unknown_buffer()
+declare ptr @get_unknown_buffer()
; Function Attrs: nounwind
-declare void @free(i8* nocapture) nounwind allockind("free") "alloc-family"="malloc"
+declare void @free(ptr nocapture) nounwind allockind("free") "alloc-family"="malloc"
; Function Attrs: nounwind readnone speculatable
-declare i64 @llvm.objectsize.i64.p0i8(i8*, i1, i1, i1)
+declare i64 @llvm.objectsize.i64.p0(ptr, i1, i1, i1)
-declare void @fortified_chk(i8*, i64)
+declare void @fortified_chk(ptr, i64)
-declare void @unfortified(i8*, i64)
+declare void @unfortified(ptr, i64)
; check that memory builtins can be handled.
define i64 @objsize1_custom_idx(i64 %sz) {
entry:
- %ptr = call i8* @malloc(i64 %sz)
- %ptr2 = getelementptr inbounds i8, i8* %ptr, i32 2
- %calc_size = call i64 @llvm.objectsize.i64.p0i8(i8* %ptr2, i1 false, i1 true, i1 true)
+ %ptr = call ptr @malloc(i64 %sz)
+ %ptr2 = getelementptr inbounds i8, ptr %ptr, i32 2
+ %calc_size = call i64 @llvm.objectsize.i64.p0(ptr %ptr2, i1 false, i1 true, i1 true)
ret i64 %calc_size
}
define i32 @objsize2_custom_idx() #0 {
entry:
%var = alloca %struct.V, align 4
- %0 = bitcast %struct.V* %var to i8*
- call void @llvm.lifetime.start.p0i8(i64 28, i8* %0) #3
- %buf1 = getelementptr inbounds %struct.V, %struct.V* %var, i32 0, i32 0
- %arrayidx = getelementptr inbounds [10 x i8], [10 x i8]* %buf1, i64 0, i64 1
- %1 = call i64 @llvm.objectsize.i64.p0i8(i8* %arrayidx, i1 false, i1 false, i1 false)
- %conv = trunc i64 %1 to i32
- call void @llvm.lifetime.end.p0i8(i64 28, i8* %0) #3
+ call void @llvm.lifetime.start.p0(i64 28, ptr %var) #3
+ %arrayidx = getelementptr inbounds [10 x i8], ptr %var, i64 0, i64 1
+ %0 = call i64 @llvm.objectsize.i64.p0(ptr %arrayidx, i1 false, i1 false, i1 false)
+ %conv = trunc i64 %0 to i32
+ call void @llvm.lifetime.end.p0(i64 28, ptr %var) #3
ret i32 %conv
; CHECK: ret i32 27
}
-declare void @llvm.lifetime.start.p0i8(i64, i8* nocapture) #1
-declare void @llvm.lifetime.end.p0i8(i64, i8* nocapture) #1
-declare i8* @malloc(i64)
-declare i64 @llvm.objectsize.i64.p0i8(i8*, i1, i1, i1)
+declare void @llvm.lifetime.start.p0(i64, ptr nocapture) #1
+declare void @llvm.lifetime.end.p0(i64, ptr nocapture) #1
+declare ptr @malloc(i64)
+declare i64 @llvm.objectsize.i64.p0(ptr, i1, i1, i1)
entry:
%Big = alloca [20 x i8], align 16
%Small = alloca [10 x i8], align 1
- %0 = getelementptr inbounds [20 x i8], [20 x i8]* %Big, i64 0, i64 0
- call void @llvm.lifetime.start.p0i8(i64 20, i8* %0)
- %1 = getelementptr inbounds [10 x i8], [10 x i8]* %Small, i64 0, i64 0
- call void @llvm.lifetime.start.p0i8(i64 10, i8* %1)
+ call void @llvm.lifetime.start.p0(i64 20, ptr %Big)
+ call void @llvm.lifetime.start.p0(i64 10, ptr %Small)
%tobool = icmp ne i32 %N, 0
- %add.ptr = getelementptr inbounds [20 x i8], [20 x i8]* %Big, i64 0, i64 10
- %cond = select i1 %tobool, i8* %add.ptr, i8* %1
- %2 = call i64 @llvm.objectsize.i64.p0i8(i8* %cond, i1 false)
- %conv = trunc i64 %2 to i32
- call void @llvm.lifetime.end.p0i8(i64 10, i8* %1)
- call void @llvm.lifetime.end.p0i8(i64 20, i8* %0)
+ %add.ptr = getelementptr inbounds [20 x i8], ptr %Big, i64 0, i64 10
+ %cond = select i1 %tobool, ptr %add.ptr, ptr %Small
+ %0 = call i64 @llvm.objectsize.i64.p0(ptr %cond, i1 false)
+ %conv = trunc i64 %0 to i32
+ call void @llvm.lifetime.end.p0(i64 10, ptr %Small)
+ call void @llvm.lifetime.end.p0(i64 20, ptr %Big)
ret i32 %conv
; CHECK: ret i32 10
}
-declare void @llvm.lifetime.start.p0i8(i64, i8* nocapture)
+declare void @llvm.lifetime.start.p0(i64, ptr nocapture)
-declare i64 @llvm.objectsize.i64.p0i8(i8*, i1)
+declare i64 @llvm.objectsize.i64.p0(ptr, i1)
-declare void @llvm.lifetime.end.p0i8(i64, i8* nocapture)
+declare void @llvm.lifetime.end.p0(i64, ptr nocapture)
define void @foo() {
entry:
%call = tail call i32 @foo1(i32 0)
%conv = sext i32 %call to i64
- %call1 = tail call i32 (i8*, ...) @printf(i8* getelementptr inbounds ([5 x i8], [5 x i8]* @.str, i64 0, i64 0), i64 %conv)
+ %call1 = tail call i32 (ptr, ...) @printf(ptr @.str, i64 %conv)
ret void
}
-declare i32 @printf(i8* nocapture readonly, ...)
+declare i32 @printf(ptr nocapture readonly, ...)
; CHECK-NEXT: ret i32 27
;
%var = alloca %struct.V, align 4
- %t0 = bitcast %struct.V* %var to i8*
- call void @llvm.lifetime.start.p0i8(i64 28, i8* %t0) #3
- %buf1 = getelementptr inbounds %struct.V, %struct.V* %var, i32 0, i32 0
- %arrayidx = getelementptr inbounds [10 x i8], [10 x i8]* %buf1, i64 0, i64 1
- %t1 = call i64 @llvm.objectsize.i64.p0i8(i8* %arrayidx, i1 false)
+ call void @llvm.lifetime.start.p0(i64 28, ptr %var) #3
+ %arrayidx = getelementptr inbounds [10 x i8], ptr %var, i64 0, i64 1
+ %t1 = call i64 @llvm.objectsize.i64.p0(ptr %arrayidx, i1 false)
%conv = trunc i64 %t1 to i32
- call void @llvm.lifetime.end.p0i8(i64 28, i8* %t0) #3
+ call void @llvm.lifetime.end.p0(i64 28, ptr %var) #3
ret i32 %conv
}
; CHECK-NEXT: ret void
;
%tab = alloca [10 x i8], align 16
- %t0 = bitcast [10 x i8]* %tab to i8*
- call void @llvm.memset.p0i8.i64(i8* align 16 %t0, i8 9, i64 10, i1 false)
- %t1 = call {}* @llvm.invariant.start.p0i8(i64 10, i8* align 16 %t0)
- call void @llvm.invariant.end.p0i8({}* %t1, i64 10, i8* align 16 %t0)
+ call void @llvm.memset.p0.i64(ptr align 16 %tab, i8 9, i64 10, i1 false)
+ %t1 = call ptr @llvm.invariant.start.p0(i64 10, ptr align 16 %tab)
+ call void @llvm.invariant.end.p0(ptr %t1, i64 10, ptr align 16 %tab)
ret void
- uselistorder i8* %t0, { 1, 0, 2 }
+ uselistorder ptr %tab, { 1, 0, 2 }
}
-define void @unknown_use_of_invariant_start({}** %p) {
+define void @unknown_use_of_invariant_start(ptr %p) {
; CHECK-LABEL: @unknown_use_of_invariant_start(
; CHECK-NEXT: ret void
;
%tab = alloca [10 x i8], align 16
- %t0 = bitcast [10 x i8]* %tab to i8*
- call void @llvm.memset.p0i8.i64(i8* align 16 %t0, i8 9, i64 10, i1 false)
- %t1 = call {}* @llvm.invariant.start.p0i8(i64 10, i8* align 16 %t0)
- call void @llvm.invariant.end.p0i8({}* %t1, i64 10, i8* align 16 %t0)
- store {}* %t1, {}** %p
+ call void @llvm.memset.p0.i64(ptr align 16 %tab, i8 9, i64 10, i1 false)
+ %t1 = call ptr @llvm.invariant.start.p0(i64 10, ptr align 16 %tab)
+ call void @llvm.invariant.end.p0(ptr %t1, i64 10, ptr align 16 %tab)
+ store ptr %t1, ptr %p
ret void
}
-define {}* @minimal_invariant_start_use(i8 %x) {
+define ptr @minimal_invariant_start_use(i8 %x) {
; CHECK-LABEL: @minimal_invariant_start_use(
-; CHECK-NEXT: ret {}* poison
+; CHECK-NEXT: ret ptr poison
;
%a = alloca i8
- %i = call {}* @llvm.invariant.start.p0i8(i64 1, i8* %a)
- ret {}* %i
+ %i = call ptr @llvm.invariant.start.p0(i64 1, ptr %a)
+ ret ptr %i
}
-declare void @llvm.lifetime.start.p0i8(i64, i8* nocapture) #1
-declare i64 @llvm.objectsize.i64.p0i8(i8*, i1) #2
-declare void @llvm.lifetime.end.p0i8(i64, i8* nocapture) #1
-declare void @llvm.memset.p0i8.i64(i8* nocapture writeonly, i8, i64, i1 immarg) #0
-declare {}* @llvm.invariant.start.p0i8(i64 immarg, i8* nocapture) #0
-declare void @llvm.invariant.end.p0i8({}*, i64 immarg, i8* nocapture) #0
+declare void @llvm.lifetime.start.p0(i64, ptr nocapture) #1
+declare i64 @llvm.objectsize.i64.p0(ptr, i1) #2
+declare void @llvm.lifetime.end.p0(i64, ptr nocapture) #1
+declare void @llvm.memset.p0.i64(ptr nocapture writeonly, i8, i64, i1 immarg) #0
+declare ptr @llvm.invariant.start.p0(i64 immarg, ptr nocapture) #0
+declare void @llvm.invariant.end.p0(ptr, i64 immarg, ptr nocapture) #0
target datalayout = "e-m:e-p270:32:32-p271:32:32-p272:64:64-i64:64-f80:128-n8:16:32:64-S128"
target triple = "x86_64-unknown-linux-gnu"
-declare dso_local noalias noundef i8* @malloc(i64 noundef) local_unnamed_addr
-declare i64 @llvm.objectsize.i64.p0i8(i8*, i1 immarg, i1 immarg, i1 immarg)
-declare noalias i8* @strdup(i8*);
-declare noalias i8* @__strdup(i8*);
-declare noalias i8* @strndup(i8*, i64);
-declare noalias i8* @__strndup(i8*, i64);
+declare dso_local noalias noundef ptr @malloc(i64 noundef) local_unnamed_addr
+declare i64 @llvm.objectsize.i64.p0(ptr, i1 immarg, i1 immarg, i1 immarg)
+declare noalias ptr @strdup(ptr);
+declare noalias ptr @__strdup(ptr);
+declare noalias ptr @strndup(ptr, i64);
+declare noalias ptr @__strndup(ptr, i64);
@str = dso_local constant [11 x i8] c"toulbroc'h\00"
; CHECK-LABEL: @check_strdup(
; CHECK-NEXT: ret i64 11
;
- %ptr = call noalias i8* @strdup(i8* noundef getelementptr inbounds ([11 x i8], [11 x i8]* @str, i64 0, i64 0))
- %size = call i64 @llvm.objectsize.i64.p0i8(i8* %ptr, i1 false, i1 true, i1 false)
+ %ptr = call noalias ptr @strdup(ptr noundef @str)
+ %size = call i64 @llvm.objectsize.i64.p0(ptr %ptr, i1 false, i1 true, i1 false)
ret i64 %size
}
; CHECK-LABEL: @check_dunder_strdup(
; CHECK-NEXT: ret i64 11
;
- %ptr = call noalias i8* @__strdup(i8* noundef getelementptr inbounds ([11 x i8], [11 x i8]* @str, i64 0, i64 0))
- %size = call i64 @llvm.objectsize.i64.p0i8(i8* %ptr, i1 false, i1 true, i1 false)
+ %ptr = call noalias ptr @__strdup(ptr noundef @str)
+ %size = call i64 @llvm.objectsize.i64.p0(ptr %ptr, i1 false, i1 true, i1 false)
ret i64 %size
}
; CHECK-LABEL: @check_strndup(
; CHECK-NEXT: ret i64 5
;
- %ptr = call noalias i8* @strndup(i8* noundef getelementptr inbounds ([11 x i8], [11 x i8]* @str, i64 0, i64 0), i64 4)
- %size = call i64 @llvm.objectsize.i64.p0i8(i8* %ptr, i1 false, i1 true, i1 false)
+ %ptr = call noalias ptr @strndup(ptr noundef @str, i64 4)
+ %size = call i64 @llvm.objectsize.i64.p0(ptr %ptr, i1 false, i1 true, i1 false)
ret i64 %size
}
; CHECK-LABEL: @check_dunder_strndup(
; CHECK-NEXT: ret i64 5
;
- %ptr = call noalias i8* @__strndup(i8* noundef getelementptr inbounds ([11 x i8], [11 x i8]* @str, i64 0, i64 0), i64 4)
- %size = call i64 @llvm.objectsize.i64.p0i8(i8* %ptr, i1 false, i1 true, i1 false)
+ %ptr = call noalias ptr @__strndup(ptr noundef @str, i64 4)
+ %size = call i64 @llvm.objectsize.i64.p0(ptr %ptr, i1 false, i1 true, i1 false)
ret i64 %size
}
; NOTE: Assertions have been autogenerated by utils/update_test_checks.py
; RUN: opt < %s -passes=instcombine -S | FileCheck %s
; Verify that a cdecl-compatible calling convention does not trigger emitting
-; unreachable idom `store i1 true, i1* undef`.
+; unreachable idom `store i1 true, ptr undef`.
-define arm_aapcs_vfpcc i8 @bar(i8* %0) {
+define arm_aapcs_vfpcc i8 @bar(ptr %0) {
; CHECK-LABEL: @bar(
-; CHECK-NEXT: [[TMP2:%.*]] = load i8, i8* [[TMP0:%.*]], align 1
+; CHECK-NEXT: [[TMP2:%.*]] = load i8, ptr [[TMP0:%.*]], align 1
; CHECK-NEXT: ret i8 [[TMP2]]
;
- %2 = load i8, i8* %0, align 1
+ %2 = load i8, ptr %0, align 1
ret i8 %2
}
-define dso_local arm_aapcs_vfpcc i8 @foo(i8* %0) {
+define dso_local arm_aapcs_vfpcc i8 @foo(ptr %0) {
; CHECK-LABEL: @foo(
-; CHECK-NEXT: [[TMP2:%.*]] = call i8 @bar(i8* [[TMP0:%.*]])
+; CHECK-NEXT: [[TMP2:%.*]] = call i8 @bar(ptr [[TMP0:%.*]])
; CHECK-NEXT: ret i8 [[TMP2]]
;
- %2 = call i8 @bar(i8* %0)
+ %2 = call i8 @bar(ptr %0)
ret i8 %2
}
; CHECK-LABEL: @_strlen1(
; CHECK-NEXT: ret i32 3
;
- %call = tail call arm_aapcscc i32 @strlen(i8* getelementptr inbounds ([4 x i8], [4 x i8]* @.str, i32 0, i32 0))
+ %call = tail call arm_aapcscc i32 @strlen(ptr @.str)
ret i32 %call
}
-declare arm_aapcscc i32 @strlen(i8*)
+declare arm_aapcscc i32 @strlen(ptr)
-define arm_aapcscc zeroext i1 @_strlen2(i8* %str) {
+define arm_aapcscc zeroext i1 @_strlen2(ptr %str) {
; CHECK-LABEL: @_strlen2(
-; CHECK-NEXT: [[STRLENFIRST:%.*]] = load i8, i8* [[STR:%.*]], align 1
+; CHECK-NEXT: [[STRLENFIRST:%.*]] = load i8, ptr [[STR:%.*]], align 1
; CHECK-NEXT: [[CMP:%.*]] = icmp ne i8 [[STRLENFIRST]], 0
; CHECK-NEXT: ret i1 [[CMP]]
;
- %call = tail call arm_aapcscc i32 @strlen(i8* %str)
+ %call = tail call arm_aapcscc i32 @strlen(ptr %str)
%cmp = icmp ne i32 %call, 0
ret i1 %cmp
}
; RUN: opt < %s -passes=instcombine -data-layout="p:32:32" -S | FileCheck %s --check-prefixes=CHECK,CHECK32
; RUN: opt < %s -passes=instcombine -data-layout="p:64:64" -S | FileCheck %s --check-prefixes=CHECK,CHECK64
-define signext i32 @b(i32* inreg %x) {
+define signext i32 @b(ptr inreg %x) {
ret i32 0
}
ret void
}
-define void @g(i32* %y) {
- call i32 bitcast (i32 (i32*)* @b to i32 (i32)*)(i32 zeroext 0)
- call void bitcast (void (...)* @c to void (i32*)*)(i32* %y)
- call void bitcast (void (...)* @c to void (i32*)*)(i32* sret(i32) %y)
- call void bitcast (void (i32, ...)* @d to void (i32, i32*)*)(i32 0, i32* sret(i32) %y)
- call void bitcast (void (i32, ...)* @d to void (i32, i32*)*)(i32 0, i32* nocapture %y)
- call void bitcast (void (i32, ...)* @d to void (i32*)*)(i32* nocapture noundef %y)
+define void @g(ptr %y) {
+ call i32 @b(i32 zeroext 0)
+ call void @c(ptr %y)
+ call void @c(ptr sret(i32) %y)
+ call void @d(i32 0, ptr sret(i32) %y)
+ call void @d(i32 0, ptr nocapture %y)
+ call void @d(ptr nocapture noundef %y)
ret void
}
-; CHECK-LABEL: define void @g(i32* %y)
-; CHECK: call i32 bitcast (i32 (i32*)* @b to i32 (i32)*)(i32 zeroext 0)
-; CHECK: call void (...) @c(i32* %y)
-; CHECK: call void bitcast (void (...)* @c to void (i32*)*)(i32* sret(i32) %y)
-; CHECK: call void bitcast (void (i32, ...)* @d to void (i32, i32*)*)(i32 0, i32* sret(i32) %y)
-; CHECK: call void (i32, ...) @d(i32 0, i32* nocapture %y)
-; CHECK32: %2 = ptrtoint i32* %y to i32
+; CHECK-LABEL: define void @g(ptr %y)
+; CHECK: call i32 @b(i32 zeroext 0)
+; CHECK: call void (...) @c(ptr %y)
+; CHECK: call void @c(ptr sret(i32) %y)
+; CHECK: call void @d(i32 0, ptr sret(i32) %y)
+; CHECK: call void (i32, ...) @d(i32 0, ptr nocapture %y)
+; CHECK32: %2 = ptrtoint ptr %y to i32
; CHECK32: call void (i32, ...) @d(i32 noundef %2)
-; CHECK64: call void bitcast (void (i32, ...)* @d to void (i32*)*)(i32* nocapture noundef %y)
+; CHECK64: call void @d(ptr nocapture noundef %y)
define i32 @main() {
; CHECK-LABEL: @main
-; CHECK: %[[call:.*]] = call i8* @ctime(i32* null)
-; CHECK: %[[cast:.*]] = ptrtoint i8* %[[call]] to i32
+; CHECK: %[[call:.*]] = call ptr @ctime(ptr null)
+; CHECK: %[[cast:.*]] = ptrtoint ptr %[[call]] to i32
; CHECK: ret i32 %[[cast]]
entry:
- %tmp = call i32 bitcast (i8* (i32*)* @ctime to i32 (i32*)*)( i32* null ) ; <i32> [#uses=1]
+ %tmp = call i32 @ctime( ptr null ) ; <i32> [#uses=1]
ret i32 %tmp
}
-declare i8* @ctime(i32*)
+declare ptr @ctime(ptr)
-define internal { i8 } @foo(i32*) {
+define internal { i8 } @foo(ptr) {
entry:
ret { i8 } { i8 0 }
}
; CHECK-LABEL: @test_struct_ret
; CHECK-NOT: bitcast
entry:
- %0 = call { i8 } bitcast ({ i8 } (i32*)* @foo to { i8 } (i16*)*)(i16* null)
+ %0 = call { i8 } @foo(ptr null)
ret void
}
declare i32 @fn1(i32)
-define i32 @test1(i32* %a) {
+define i32 @test1(ptr %a) {
; CHECK-LABEL: @test1
-; CHECK: %[[cast:.*]] = ptrtoint i32* %a to i32
+; CHECK: %[[cast:.*]] = ptrtoint ptr %a to i32
; CHECK-NEXT: %[[call:.*]] = tail call i32 @fn1(i32 %[[cast]])
; CHECK-NEXT: ret i32 %[[call]]
entry:
- %call = tail call i32 bitcast (i32 (i32)* @fn1 to i32 (i32*)*)(i32* %a)
+ %call = tail call i32 @fn1(ptr %a)
ret i32 %call
}
declare i32 @fn2(i16)
-define i32 @test2(i32* %a) {
+define i32 @test2(ptr %a) {
; CHECK-LABEL: @test2
-; CHECK: %[[call:.*]] = tail call i32 bitcast (i32 (i16)* @fn2 to i32 (i32*)*)(i32* %a)
+; CHECK: %[[call:.*]] = tail call i32 @fn2(ptr %a)
; CHECK-NEXT: ret i32 %[[call]]
entry:
- %call = tail call i32 bitcast (i32 (i16)* @fn2 to i32 (i32*)*)(i32* %a)
+ %call = tail call i32 @fn2(ptr %a)
ret i32 %call
}
declare i32 @fn3(i64)
-define i32 @test3(i32* %a) {
+define i32 @test3(ptr %a) {
; CHECK-LABEL: @test3
-; CHECK: %[[call:.*]] = tail call i32 bitcast (i32 (i64)* @fn3 to i32 (i32*)*)(i32* %a)
+; CHECK: %[[call:.*]] = tail call i32 @fn3(ptr %a)
; CHECK-NEXT: ret i32 %[[call]]
entry:
- %call = tail call i32 bitcast (i32 (i64)* @fn3 to i32 (i32*)*)(i32* %a)
+ %call = tail call i32 @fn3(ptr %a)
ret i32 %call
}
declare i32 @fn4(i32) "thunk"
-define i32 @test4(i32* %a) {
+define i32 @test4(ptr %a) {
; CHECK-LABEL: @test4
-; CHECK: %[[call:.*]] = tail call i32 bitcast (i32 (i32)* @fn4 to i32 (i32*)*)(i32* %a)
+; CHECK: %[[call:.*]] = tail call i32 @fn4(ptr %a)
; CHECK-NEXT: ret i32 %[[call]]
entry:
- %call = tail call i32 bitcast (i32 (i32)* @fn4 to i32 (i32*)*)(i32* %a)
+ %call = tail call i32 @fn4(ptr %a)
ret i32 %call
}
-declare i1 @fn5({ i32, i32 }* byval({ i32, i32 }) align 4 %r)
+declare i1 @fn5(ptr byval({ i32, i32 }) align 4 %r)
define i1 @test5() {
; CHECK-LABEL: @test5
-; CHECK: %[[call:.*]] = call i1 bitcast (i1 ({ i32, i32 }*)* @fn5 to i1 (i32, i32)*)(i32 {{.*}}, i32 {{.*}})
+; CHECK: %[[call:.*]] = call i1 @fn5(i32 {{.*}}, i32 {{.*}})
; CHECK-NEXT: ret i1 %[[call]]
%1 = alloca { i32, i32 }, align 4
- %2 = getelementptr inbounds { i32, i32 }, { i32, i32 }* %1, i32 0, i32 0
- %3 = load i32, i32* %2, align 4
- %4 = getelementptr inbounds { i32, i32 }, { i32, i32 }* %1, i32 0, i32 1
- %5 = load i32, i32* %4, align 4
- %6 = call i1 bitcast (i1 ({ i32, i32 }*)* @fn5 to i1 (i32, i32)*)(i32 %3, i32 %5)
+ %2 = getelementptr inbounds { i32, i32 }, ptr %1, i32 0, i32 0
+ %3 = load i32, ptr %2, align 4
+ %4 = getelementptr inbounds { i32, i32 }, ptr %1, i32 0, i32 1
+ %5 = load i32, ptr %4, align 4
+ %6 = call i1 @fn5(i32 %3, i32 %5)
ret i1 %6
}
; Might not be legal to hoist the load above the first guard since the
; guard might control dereferenceability
-define void @negative_load(i32 %V1, i32* %P) {
+define void @negative_load(i32 %V1, ptr %P) {
; CHECK-LABEL: @negative_load(
; CHECK-NEXT: [[A:%.*]] = icmp slt i32 [[V1:%.*]], 0
; CHECK-NEXT: call void (i1, ...) @llvm.experimental.guard(i1 [[A]], i32 123) [ "deopt"() ]
-; CHECK-NEXT: [[V2:%.*]] = load i32, i32* [[P:%.*]], align 4
+; CHECK-NEXT: [[V2:%.*]] = load i32, ptr [[P:%.*]], align 4
; CHECK-NEXT: [[B:%.*]] = icmp slt i32 [[V2]], 0
; CHECK-NEXT: call void (i1, ...) @llvm.experimental.guard(i1 [[B]], i32 456) [ "deopt"() ]
; CHECK-NEXT: ret void
;
%A = icmp slt i32 %V1, 0
call void(i1, ...) @llvm.experimental.guard( i1 %A, i32 123 )[ "deopt"() ]
- %V2 = load i32, i32* %P
+ %V2 = load i32, ptr %P
%B = icmp slt i32 %V2, 0
call void(i1, ...) @llvm.experimental.guard( i1 %B, i32 456 )[ "deopt"() ]
ret void
}
-define void @deref_load(i32 %V1, i32* dereferenceable(4) align 4 %P) nofree nosync {
+define void @deref_load(i32 %V1, ptr dereferenceable(4) align 4 %P) nofree nosync {
; CHECK-LABEL: @deref_load(
-; CHECK-NEXT: [[V2:%.*]] = load i32, i32* [[P:%.*]], align 4
+; CHECK-NEXT: [[V2:%.*]] = load i32, ptr [[P:%.*]], align 4
; CHECK-NEXT: [[TMP1:%.*]] = and i32 [[V2]], [[V1:%.*]]
; CHECK-NEXT: [[TMP2:%.*]] = icmp slt i32 [[TMP1]], 0
; CHECK-NEXT: call void (i1, ...) @llvm.experimental.guard(i1 [[TMP2]], i32 123) [ "deopt"() ]
;
%A = icmp slt i32 %V1, 0
call void(i1, ...) @llvm.experimental.guard( i1 %A, i32 123 )[ "deopt"() ]
- %V2 = load i32, i32* %P
+ %V2 = load i32, ptr %P
%B = icmp slt i32 %V2, 0
call void(i1, ...) @llvm.experimental.guard( i1 %B, i32 456 )[ "deopt"() ]
ret void
@X = global i8 0
@Y = global i8 12
-declare void @llvm.memmove.p0i8.p0i8.i32(i8*, i8*, i32, i1)
+declare void @llvm.memmove.p0.p0.i32(ptr, ptr, i32, i1)
-declare void @llvm.memcpy.p0i8.p0i8.i32(i8*, i8*, i32, i1)
+declare void @llvm.memcpy.p0.p0.i32(ptr, ptr, i32, i1)
-declare void @llvm.memset.p0i8.i32(i8*, i8, i32, i1)
+declare void @llvm.memset.p0.i32(ptr, i8, i32, i1)
define void @zero_byte_test() {
; These process zero bytes, so they are a noop.
- call void @llvm.memmove.p0i8.p0i8.i32(i8* @X, i8* @Y, i32 0, i1 false )
- call void @llvm.memcpy.p0i8.p0i8.i32(i8* @X, i8* @Y, i32 0, i1 false )
- call void @llvm.memset.p0i8.i32(i8* @X, i8 123, i32 0, i1 false )
+ call void @llvm.memmove.p0.p0.i32(ptr @X, ptr @Y, i32 0, i1 false )
+ call void @llvm.memcpy.p0.p0.i32(ptr @X, ptr @Y, i32 0, i1 false )
+ call void @llvm.memset.p0.i32(ptr @X, i8 123, i32 0, i1 false )
ret void
}
; This used to crash trying to do a double-to-pointer conversion
define i32 @bar() {
entry:
- %retval = alloca i32, align 4 ; <i32*> [#uses=1]
- %tmp = call i32 (...) bitcast (i32 (i8*)* @f to i32 (...)*)( double 3.000000e+00 ) ; <i32> [#uses=0]
+ %retval = alloca i32, align 4 ; <ptr> [#uses=1]
+ %tmp = call i32 (...) @f( double 3.000000e+00 ) ; <i32> [#uses=0]
br label %return
return: ; preds = %entry
- %retval1 = load i32, i32* %retval ; <i32> [#uses=1]
+ %retval1 = load i32, ptr %retval ; <i32> [#uses=1]
ret i32 %retval1
}
-define i32 @f(i8* %p) {
+define i32 @f(ptr %p) {
entry:
- %p_addr = alloca i8* ; <i8**> [#uses=1]
- %retval = alloca i32, align 4 ; <i32*> [#uses=1]
- store i8* %p, i8** %p_addr
+ %p_addr = alloca ptr ; <ptr> [#uses=1]
+ %retval = alloca i32, align 4 ; <ptr> [#uses=1]
+ store ptr %p, ptr %p_addr
br label %return
return: ; preds = %entry
- %retval1 = load i32, i32* %retval ; <i32> [#uses=1]
+ %retval1 = load i32, ptr %retval ; <i32> [#uses=1]
ret i32 %retval1
}
; RUN: opt < %s -passes=instcombine -S | FileCheck %s
; InstCombine should mark null-checked argument as nonnull at callsite
-declare void @dummy(i32*, i32)
+declare void @dummy(ptr, i32)
-define void @test(i32* %a, i32 %b) {
+define void @test(ptr %a, i32 %b) {
; CHECK-LABEL: @test(
; CHECK-NEXT: entry:
-; CHECK-NEXT: [[COND1:%.*]] = icmp eq i32* [[A:%.*]], null
+; CHECK-NEXT: [[COND1:%.*]] = icmp eq ptr [[A:%.*]], null
; CHECK-NEXT: br i1 [[COND1]], label [[DEAD:%.*]], label [[NOT_NULL:%.*]]
; CHECK: not_null:
; CHECK-NEXT: [[COND2:%.*]] = icmp eq i32 [[B:%.*]], 0
; CHECK-NEXT: br i1 [[COND2]], label [[DEAD]], label [[NOT_ZERO:%.*]]
; CHECK: not_zero:
-; CHECK-NEXT: call void @dummy(i32* nonnull [[A]], i32 [[B]])
+; CHECK-NEXT: call void @dummy(ptr nonnull [[A]], i32 [[B]])
; CHECK-NEXT: ret void
; CHECK: dead:
; CHECK-NEXT: unreachable
;
entry:
- %cond1 = icmp eq i32* %a, null
+ %cond1 = icmp eq ptr %a, null
br i1 %cond1, label %dead, label %not_null
not_null:
%cond2 = icmp eq i32 %b, 0
br i1 %cond2, label %dead, label %not_zero
not_zero:
- call void @dummy(i32* %a, i32 %b)
+ call void @dummy(ptr %a, i32 %b)
ret void
dead:
unreachable
; The nonnull attribute in the 'bar' declaration is
; propagated to the parameters of the 'baz' callsite.
-declare void @bar(i8*, i8* nonnull noundef)
-declare void @bar_without_noundef(i8*, i8* nonnull)
-declare void @baz(i8*, i8*)
+declare void @bar(ptr, ptr nonnull noundef)
+declare void @bar_without_noundef(ptr, ptr nonnull)
+declare void @baz(ptr, ptr)
-define void @deduce_nonnull_from_another_call(i8* %a, i8* %b) {
+define void @deduce_nonnull_from_another_call(ptr %a, ptr %b) {
; CHECK-LABEL: @deduce_nonnull_from_another_call(
-; CHECK-NEXT: call void @bar(i8* [[A:%.*]], i8* [[B:%.*]])
-; CHECK-NEXT: call void @baz(i8* nonnull [[B]], i8* nonnull [[B]])
+; CHECK-NEXT: call void @bar(ptr [[A:%.*]], ptr [[B:%.*]])
+; CHECK-NEXT: call void @baz(ptr nonnull [[B]], ptr nonnull [[B]])
; CHECK-NEXT: ret void
;
- call void @bar(i8* %a, i8* %b)
- call void @baz(i8* %b, i8* %b)
+ call void @bar(ptr %a, ptr %b)
+ call void @baz(ptr %b, ptr %b)
ret void
}
-define void @deduce_nonnull_from_another_call2(i8* %a, i8* %b) {
+define void @deduce_nonnull_from_another_call2(ptr %a, ptr %b) {
; CHECK-LABEL: @deduce_nonnull_from_another_call2(
-; CHECK-NEXT: call void @bar_without_noundef(i8* [[A:%.*]], i8* [[B:%.*]])
-; CHECK-NEXT: call void @baz(i8* [[B]], i8* [[B]])
+; CHECK-NEXT: call void @bar_without_noundef(ptr [[A:%.*]], ptr [[B:%.*]])
+; CHECK-NEXT: call void @baz(ptr [[B]], ptr [[B]])
; CHECK-NEXT: ret void
;
- call void @bar_without_noundef(i8* %a, i8* %b)
- call void @baz(i8* %b, i8* %b)
+ call void @bar_without_noundef(ptr %a, ptr %b)
+ call void @baz(ptr %b, ptr %b)
ret void
}
; The argument types should match if it is the standard library calloc.
; Don't crash analyzing an imposter.
-declare i8* @calloc(i64, i32)
+declare ptr @calloc(i64, i32)
define void @PR50846() {
; CHECK-LABEL: @PR50846(
-; CHECK-NEXT: [[CALL:%.*]] = call i8* @calloc(i64 1, i32 1)
+; CHECK-NEXT: [[CALL:%.*]] = call ptr @calloc(i64 1, i32 1)
; CHECK-NEXT: ret void
;
- %call = call i8* @calloc(i64 1, i32 1)
+ %call = call ptr @calloc(i64 1, i32 1)
ret void
}
;-------------------------------------------------------------------------------
-define i32* @t22_pointers(i32* %x, i32* %replacement_low, i32* %replacement_high) {
+define ptr @t22_pointers(ptr %x, ptr %replacement_low, ptr %replacement_high) {
; CHECK-LABEL: @t22_pointers(
-; CHECK-NEXT: [[T0:%.*]] = icmp slt i32* [[X:%.*]], inttoptr (i64 65536 to i32*)
-; CHECK-NEXT: [[T1:%.*]] = select i1 [[T0]], i32* [[REPLACEMENT_LOW:%.*]], i32* [[REPLACEMENT_HIGH:%.*]]
-; CHECK-NEXT: [[T2:%.*]] = icmp ult i32* [[X]], inttoptr (i64 65536 to i32*)
-; CHECK-NEXT: [[R:%.*]] = select i1 [[T2]], i32* [[X]], i32* [[T1]]
-; CHECK-NEXT: ret i32* [[R]]
+; CHECK-NEXT: [[T0:%.*]] = icmp slt ptr [[X:%.*]], inttoptr (i64 65536 to ptr)
+; CHECK-NEXT: [[T1:%.*]] = select i1 [[T0]], ptr [[REPLACEMENT_LOW:%.*]], ptr [[REPLACEMENT_HIGH:%.*]]
+; CHECK-NEXT: [[T2:%.*]] = icmp ult ptr [[X]], inttoptr (i64 65536 to ptr)
+; CHECK-NEXT: [[R:%.*]] = select i1 [[T2]], ptr [[X]], ptr [[T1]]
+; CHECK-NEXT: ret ptr [[R]]
;
- %t0 = icmp slt i32* %x, inttoptr (i64 65536 to i32*)
- %t1 = select i1 %t0, i32* %replacement_low, i32* %replacement_high
- %t2 = icmp ult i32* %x, inttoptr (i64 65536 to i32*)
- %r = select i1 %t2, i32* %x, i32* %t1
- ret i32* %r
+ %t0 = icmp slt ptr %x, inttoptr (i64 65536 to ptr)
+ %t1 = select i1 %t0, ptr %replacement_low, ptr %replacement_high
+ %t2 = icmp ult ptr %x, inttoptr (i64 65536 to ptr)
+ %r = select i1 %t2, ptr %x, ptr %t1
+ ret ptr %r
}
}
; Extra use can be adjusted. While there, test multi-bb case.
-define i8 @t3(i8 %x, i8 %v0, i8 %v1, i8 %v2, i8 %v3, i8* %out, i1 %c) {
+define i8 @t3(i8 %x, i8 %v0, i8 %v1, i8 %v2, i8 %v3, ptr %out, i1 %c) {
; CHECK-LABEL: @t3(
; CHECK-NEXT: bb0:
; CHECK-NEXT: [[T0:%.*]] = and i8 [[X:%.*]], 1
; CHECK-NEXT: br i1 [[C:%.*]], label [[BB1:%.*]], label [[BB2:%.*]]
; CHECK: bb1:
; CHECK-NEXT: [[R0:%.*]] = select i1 [[T1_NOT]], i8 [[V1:%.*]], i8 [[V0:%.*]]
-; CHECK-NEXT: store i8 [[R0]], i8* [[OUT:%.*]], align 1
+; CHECK-NEXT: store i8 [[R0]], ptr [[OUT:%.*]], align 1
; CHECK-NEXT: br label [[BB2]]
; CHECK: bb2:
; CHECK-NEXT: [[R1:%.*]] = select i1 [[T1_NOT]], i8 [[V3:%.*]], i8 [[V2:%.*]]
br i1 %c, label %bb1, label %bb2
bb1:
%r0 = select i1 %t1, i8 %v0, i8 %v1
- store i8 %r0, i8* %out
+ store i8 %r0, ptr %out
br label %bb2
bb2:
%r1 = select i1 %t1, i8 %v2, i8 %v3
ret i8 %r1
}
-define i8 @t4(i8 %x, i8 %v0, i8 %v1, i8 %v2, i8 %v3, i8* %out) {
+define i8 @t4(i8 %x, i8 %v0, i8 %v1, i8 %v2, i8 %v3, ptr %out) {
; CHECK-LABEL: @t4(
; CHECK-NEXT: [[T0:%.*]] = and i8 [[X:%.*]], 1
; CHECK-NEXT: [[T1_NOT:%.*]] = icmp eq i8 [[T0]], 0
; CHECK-NEXT: [[R0:%.*]] = select i1 [[T1_NOT]], i8 [[V1:%.*]], i8 [[V0:%.*]]
-; CHECK-NEXT: store i8 [[R0]], i8* [[OUT:%.*]], align 1
+; CHECK-NEXT: store i8 [[R0]], ptr [[OUT:%.*]], align 1
; CHECK-NEXT: [[R1:%.*]] = select i1 [[T1_NOT]], i8 [[V3:%.*]], i8 [[V2:%.*]]
; CHECK-NEXT: ret i8 [[R1]]
;
%t0 = and i8 %x, 1
%t1 = icmp ne i8 %t0, 0
%r0 = select i1 %t1, i8 %v0, i8 %v1
- store i8 %r0, i8* %out
+ store i8 %r0, ptr %out
%r1 = select i1 %t1, i8 %v2, i8 %v3
ret i8 %r1
}
; prototype casts.
declare i32 @__gxx_personality_v0(...)
-declare void @__cxa_call_unexpected(i8*)
-declare void @foo(i16* %a)
+declare void @__cxa_call_unexpected(ptr)
+declare void @foo(ptr %a)
; CHECK-LABEL: @test_call()
-; CHECK: call void @foo(i16* null), !prof ![[$PROF:[0-9]+]]
+; CHECK: call void @foo(ptr null), !prof ![[$PROF:[0-9]+]]
define void @test_call() {
- call void bitcast (void (i16*)* @foo to void (i8*)*) (i8* null), !prof !0
+ call void @foo (ptr null), !prof !0
ret void
}
; CHECK-LABEL: @test_invoke()
-; CHECK: invoke void @foo(i16* null)
+; CHECK: invoke void @foo(ptr null)
; CHECK-NEXT: to label %done unwind label %lpad, !prof ![[$PROF]]
-define void @test_invoke() personality i8* bitcast (i32 (...)* @__gxx_personality_v0 to i8*) {
- invoke void bitcast (void (i16*)* @foo to void (i8*)*) (i8* null)
+define void @test_invoke() personality ptr @__gxx_personality_v0 {
+ invoke void @foo (ptr null)
to label %done unwind label %lpad, !prof !0
done:
ret void
lpad:
- %lp = landingpad { i8*, i32 }
- filter [0 x i8*] zeroinitializer
- %ehptr = extractvalue { i8*, i32 } %lp, 0
- tail call void @__cxa_call_unexpected(i8* %ehptr) noreturn nounwind
+ %lp = landingpad { ptr, i32 }
+ filter [0 x ptr] zeroinitializer
+ %ehptr = extractvalue { ptr, i32 } %lp, 0
+ tail call void @__cxa_call_unexpected(ptr %ehptr) noreturn nounwind
unreachable
}
; RUN: opt < %s -instcombine -always-inline -S | FileCheck %s
-define internal void @foo(i16*) alwaysinline {
+define internal void @foo(ptr) alwaysinline {
ret void
}
else:
; CHECK-NOT: call
- call void bitcast (void (i16*)* @foo to void (i8*)*) (i8* null)
+ call void @foo (ptr null)
ret void
}
; CHECK-LABEL: @g(
entry:
; CHECK: call void @foo(i32 0) [ "deopt"() ]
- call void bitcast (void (i32)* @foo to void ()*) () [ "deopt"() ]
+ call void @foo () [ "deopt"() ]
ret void
}
ret i1 %C
}
-define i1 @test7(i8* %A) {
+define i1 @test7(ptr %A) {
; CHECK-LABEL: @test7(
-; CHECK-NEXT: [[C:%.*]] = icmp eq i8* %A, null
+; CHECK-NEXT: [[C:%.*]] = icmp eq ptr %A, null
; CHECK-NEXT: ret i1 [[C]]
;
- %B = bitcast i8* %A to i32*
- %C = icmp eq i32* %B, null
+ %C = icmp eq ptr %A, null
ret i1 %C
}
; CHECK-NEXT: [[CALLB:%.*]] = alloca [258 x float], align 4
; CHECK-NEXT: [[CONV_I:%.*]] = uitofp i32 [[INUMSTEPS:%.*]] to float
; CHECK-NEXT: [[CONV_I12:%.*]] = zext i32 [[TID:%.*]] to i64
-; CHECK-NEXT: [[ARRAYIDX3:%.*]] = getelementptr inbounds [258 x float], [258 x float]* [[CALLA]], i64 0, i64 [[CONV_I12]]
-; CHECK-NEXT: store float [[CONV_I]], float* [[ARRAYIDX3]], align 4
-; CHECK-NEXT: [[ARRAYIDX6:%.*]] = getelementptr inbounds [258 x float], [258 x float]* [[CALLB]], i64 0, i64 [[CONV_I12]]
-; CHECK-NEXT: store float [[CONV_I]], float* [[ARRAYIDX6]], align 4
+; CHECK-NEXT: [[ARRAYIDX3:%.*]] = getelementptr inbounds [258 x float], ptr [[CALLA]], i64 0, i64 [[CONV_I12]]
+; CHECK-NEXT: store float [[CONV_I]], ptr [[ARRAYIDX3]], align 4
+; CHECK-NEXT: [[ARRAYIDX6:%.*]] = getelementptr inbounds [258 x float], ptr [[CALLB]], i64 0, i64 [[CONV_I12]]
+; CHECK-NEXT: store float [[CONV_I]], ptr [[ARRAYIDX6]], align 4
; CHECK-NEXT: [[CMP7:%.*]] = icmp eq i32 [[TID]], 0
; CHECK-NEXT: br i1 [[CMP7]], label [[DOTBB1:%.*]], label [[DOTBB2:%.*]]
; CHECK: .bb1:
-; CHECK-NEXT: [[ARRAYIDX10:%.*]] = getelementptr inbounds [258 x float], [258 x float]* [[CALLA]], i64 0, i64 256
-; CHECK-NEXT: store float [[CONV_I]], float* [[ARRAYIDX10]], align 4
-; CHECK-NEXT: [[ARRAYIDX11:%.*]] = getelementptr inbounds [258 x float], [258 x float]* [[CALLB]], i64 0, i64 256
-; CHECK-NEXT: store float 0.000000e+00, float* [[ARRAYIDX11]], align 4
+; CHECK-NEXT: [[ARRAYIDX10:%.*]] = getelementptr inbounds [258 x float], ptr [[CALLA]], i64 0, i64 256
+; CHECK-NEXT: store float [[CONV_I]], ptr [[ARRAYIDX10]], align 4
+; CHECK-NEXT: [[ARRAYIDX11:%.*]] = getelementptr inbounds [258 x float], ptr [[CALLB]], i64 0, i64 256
+; CHECK-NEXT: store float 0.000000e+00, ptr [[ARRAYIDX11]], align 4
; CHECK-NEXT: br label [[DOTBB2]]
; CHECK: .bb2:
; CHECK-NEXT: [[CMP135:%.*]] = icmp sgt i32 [[INUMSTEPS]], 0
; CHECK-NEXT: [[TMP3:%.*]] = icmp ugt i32 [[I12_06]], [[BASE:%.*]]
; CHECK-NEXT: [[ADD:%.*]] = add i32 [[I12_06]], 1
; CHECK-NEXT: [[CONV_I9:%.*]] = sext i32 [[ADD]] to i64
-; CHECK-NEXT: [[ARRAYIDX20:%.*]] = getelementptr inbounds [258 x float], [258 x float]* [[CALLA]], i64 0, i64 [[CONV_I9]]
-; CHECK-NEXT: [[ARRAYIDX24:%.*]] = getelementptr inbounds [258 x float], [258 x float]* [[CALLB]], i64 0, i64 [[CONV_I9]]
+; CHECK-NEXT: [[ARRAYIDX20:%.*]] = getelementptr inbounds [258 x float], ptr [[CALLA]], i64 0, i64 [[CONV_I9]]
+; CHECK-NEXT: [[ARRAYIDX24:%.*]] = getelementptr inbounds [258 x float], ptr [[CALLB]], i64 0, i64 [[CONV_I9]]
; CHECK-NEXT: [[CMP40:%.*]] = icmp ult i32 [[I12_06]], [[BASE]]
; CHECK-NEXT: br i1 [[TMP3]], label [[DOTBB4:%.*]], label [[DOTBB5:%.*]]
; CHECK: .bb4:
-; CHECK-NEXT: [[TMP4:%.*]] = load float, float* [[ARRAYIDX20]], align 4
-; CHECK-NEXT: [[TMP5:%.*]] = load float, float* [[ARRAYIDX24]], align 4
+; CHECK-NEXT: [[TMP4:%.*]] = load float, ptr [[ARRAYIDX20]], align 4
+; CHECK-NEXT: [[TMP5:%.*]] = load float, ptr [[ARRAYIDX24]], align 4
; CHECK-NEXT: [[ADD33:%.*]] = fadd float [[TMP5]], [[TMP4]]
; CHECK-NEXT: [[ADD33_1:%.*]] = fadd float [[ADD33]], [[TMP1]]
; CHECK-NEXT: [[ADD33_2:%.*]] = fadd float [[ADD33_1]], [[TMP2]]
; CHECK-NEXT: [[TMP7:%.*]] = phi float [ [[ADD33_2]], [[DOTBB4]] ], [ [[TMP2]], [[DOTBB3]] ]
; CHECK-NEXT: br i1 [[CMP40]], label [[DOTBB6:%.*]], label [[DOTBB7:%.*]]
; CHECK: .bb6:
-; CHECK-NEXT: store float [[TMP7]], float* [[ARRAYIDX3]], align 4
-; CHECK-NEXT: store float [[TMP6]], float* [[ARRAYIDX6]], align 4
+; CHECK-NEXT: store float [[TMP7]], ptr [[ARRAYIDX3]], align 4
+; CHECK-NEXT: store float [[TMP6]], ptr [[ARRAYIDX6]], align 4
; CHECK-NEXT: br label [[DOTBB7]]
; CHECK: .bb7:
; CHECK-NEXT: br i1 [[TMP3]], label [[DOTBB9:%.*]], label [[DOTBB10:%.*]]
; CHECK: .bb8:
; CHECK-NEXT: ret void
; CHECK: .bb9:
-; CHECK-NEXT: [[TMP8:%.*]] = load float, float* [[ARRAYIDX20]], align 4
-; CHECK-NEXT: [[TMP9:%.*]] = load float, float* [[ARRAYIDX24]], align 4
+; CHECK-NEXT: [[TMP8:%.*]] = load float, ptr [[ARRAYIDX20]], align 4
+; CHECK-NEXT: [[TMP9:%.*]] = load float, ptr [[ARRAYIDX24]], align 4
; CHECK-NEXT: [[ADD33_112:%.*]] = fadd float [[TMP9]], [[TMP8]]
; CHECK-NEXT: [[ADD33_1_1:%.*]] = fadd float [[ADD33_112]], [[TMP6]]
; CHECK-NEXT: [[ADD33_2_1:%.*]] = fadd float [[ADD33_1_1]], [[TMP7]]
; CHECK-NEXT: [[TMP11]] = phi float [ [[ADD33_2_1]], [[DOTBB9]] ], [ [[TMP7]], [[DOTBB7]] ]
; CHECK-NEXT: br i1 [[CMP40]], label [[DOTBB11:%.*]], label [[DOTBB12]]
; CHECK: .bb11:
-; CHECK-NEXT: store float [[TMP11]], float* [[ARRAYIDX3]], align 4
-; CHECK-NEXT: store float [[TMP10]], float* [[ARRAYIDX6]], align 4
+; CHECK-NEXT: store float [[TMP11]], ptr [[ARRAYIDX3]], align 4
+; CHECK-NEXT: store float [[TMP10]], ptr [[ARRAYIDX6]], align 4
; CHECK-NEXT: br label [[DOTBB12]]
; CHECK: .bb12:
; CHECK-NEXT: [[SUB]] = add i32 [[I12_06]], -4
%conv.i = uitofp i32 %iNumSteps to float
%1 = bitcast float %conv.i to i32
%conv.i12 = zext i32 %tid to i64
- %arrayidx3 = getelementptr inbounds [258 x float], [258 x float]* %callA, i64 0, i64 %conv.i12
- %2 = bitcast float* %arrayidx3 to i32*
- store i32 %1, i32* %2, align 4
- %arrayidx6 = getelementptr inbounds [258 x float], [258 x float]* %callB, i64 0, i64 %conv.i12
- %3 = bitcast float* %arrayidx6 to i32*
- store i32 %1, i32* %3, align 4
+ %arrayidx3 = getelementptr inbounds [258 x float], ptr %callA, i64 0, i64 %conv.i12
+ store i32 %1, ptr %arrayidx3, align 4
+ %arrayidx6 = getelementptr inbounds [258 x float], ptr %callB, i64 0, i64 %conv.i12
+ store i32 %1, ptr %arrayidx6, align 4
%cmp7 = icmp eq i32 %tid, 0
br i1 %cmp7, label %.bb1, label %.bb2
.bb1:
- %arrayidx10 = getelementptr inbounds [258 x float], [258 x float]* %callA, i64 0, i64 256
- store float %conv.i, float* %arrayidx10, align 4
- %arrayidx11 = getelementptr inbounds [258 x float], [258 x float]* %callB, i64 0, i64 256
- store float 0.000000e+00, float* %arrayidx11, align 4
+ %arrayidx10 = getelementptr inbounds [258 x float], ptr %callA, i64 0, i64 256
+ store float %conv.i, ptr %arrayidx10, align 4
+ %arrayidx11 = getelementptr inbounds [258 x float], ptr %callB, i64 0, i64 256
+ store float 0.000000e+00, ptr %arrayidx11, align 4
br label %.bb2
.bb2:
%rA.sroa.8.0 = phi i32 [ %rA.sroa.8.2, %.bb12 ], [ %1, %.bb2 ]
%rA.sroa.0.0 = phi i32 [ %rA.sroa.0.2, %.bb12 ], [ %1, %.bb2 ]
%i12.06 = phi i32 [ %sub, %.bb12 ], [ %iNumSteps, %.bb2 ]
- %4 = icmp ugt i32 %i12.06, %base
+ %2 = icmp ugt i32 %i12.06, %base
%add = add i32 %i12.06, 1
%conv.i9 = sext i32 %add to i64
- %arrayidx20 = getelementptr inbounds [258 x float], [258 x float]* %callA, i64 0, i64 %conv.i9
- %5 = bitcast float* %arrayidx20 to i32*
- %arrayidx24 = getelementptr inbounds [258 x float], [258 x float]* %callB, i64 0, i64 %conv.i9
- %6 = bitcast float* %arrayidx24 to i32*
+ %arrayidx20 = getelementptr inbounds [258 x float], ptr %callA, i64 0, i64 %conv.i9
+ %arrayidx24 = getelementptr inbounds [258 x float], ptr %callB, i64 0, i64 %conv.i9
%cmp40 = icmp ult i32 %i12.06, %base
- br i1 %4, label %.bb4, label %.bb5
+ br i1 %2, label %.bb4, label %.bb5
.bb4:
- %7 = load i32, i32* %5, align 4
- %8 = load i32, i32* %6, align 4
- %9 = bitcast i32 %8 to float
- %10 = bitcast i32 %7 to float
- %add33 = fadd float %9, %10
- %11 = bitcast i32 %rA.sroa.8.0 to float
- %add33.1 = fadd float %add33, %11
- %12 = bitcast float %add33.1 to i32
- %13 = bitcast i32 %rA.sroa.0.0 to float
- %add33.2 = fadd float %add33.1, %13
- %14 = bitcast float %add33.2 to i32
+ %3 = load i32, ptr %arrayidx20, align 4
+ %4 = load i32, ptr %arrayidx24, align 4
+ %5 = bitcast i32 %4 to float
+ %6 = bitcast i32 %3 to float
+ %add33 = fadd float %5, %6
+ %7 = bitcast i32 %rA.sroa.8.0 to float
+ %add33.1 = fadd float %add33, %7
+ %8 = bitcast float %add33.1 to i32
+ %9 = bitcast i32 %rA.sroa.0.0 to float
+ %add33.2 = fadd float %add33.1, %9
+ %10 = bitcast float %add33.2 to i32
br label %.bb5
.bb5:
- %rA.sroa.8.1 = phi i32 [ %12, %.bb4 ], [ %rA.sroa.8.0, %.bb3 ]
- %rA.sroa.0.1 = phi i32 [ %14, %.bb4 ], [ %rA.sroa.0.0, %.bb3 ]
+ %rA.sroa.8.1 = phi i32 [ %8, %.bb4 ], [ %rA.sroa.8.0, %.bb3 ]
+ %rA.sroa.0.1 = phi i32 [ %10, %.bb4 ], [ %rA.sroa.0.0, %.bb3 ]
br i1 %cmp40, label %.bb6, label %.bb7
.bb6:
- store i32 %rA.sroa.0.1, i32* %2, align 4
- store i32 %rA.sroa.8.1, i32* %3, align 4
+ store i32 %rA.sroa.0.1, ptr %arrayidx3, align 4
+ store i32 %rA.sroa.8.1, ptr %arrayidx6, align 4
br label %.bb7
.bb7:
- br i1 %4, label %.bb9, label %.bb10
+ br i1 %2, label %.bb9, label %.bb10
.bb8:
ret void
.bb9:
- %15 = load i32, i32* %5, align 4
- %16 = load i32, i32* %6, align 4
- %17 = bitcast i32 %16 to float
- %18 = bitcast i32 %15 to float
- %add33.112 = fadd float %17, %18
- %19 = bitcast i32 %rA.sroa.8.1 to float
- %add33.1.1 = fadd float %add33.112, %19
- %20 = bitcast float %add33.1.1 to i32
- %21 = bitcast i32 %rA.sroa.0.1 to float
- %add33.2.1 = fadd float %add33.1.1, %21
- %22 = bitcast float %add33.2.1 to i32
+ %11 = load i32, ptr %arrayidx20, align 4
+ %12 = load i32, ptr %arrayidx24, align 4
+ %13 = bitcast i32 %12 to float
+ %14 = bitcast i32 %11 to float
+ %add33.112 = fadd float %13, %14
+ %15 = bitcast i32 %rA.sroa.8.1 to float
+ %add33.1.1 = fadd float %add33.112, %15
+ %16 = bitcast float %add33.1.1 to i32
+ %17 = bitcast i32 %rA.sroa.0.1 to float
+ %add33.2.1 = fadd float %add33.1.1, %17
+ %18 = bitcast float %add33.2.1 to i32
br label %.bb10
.bb10:
- %rA.sroa.8.2 = phi i32 [ %20, %.bb9 ], [ %rA.sroa.8.1, %.bb7 ]
- %rA.sroa.0.2 = phi i32 [ %22, %.bb9 ], [ %rA.sroa.0.1, %.bb7 ]
+ %rA.sroa.8.2 = phi i32 [ %16, %.bb9 ], [ %rA.sroa.8.1, %.bb7 ]
+ %rA.sroa.0.2 = phi i32 [ %18, %.bb9 ], [ %rA.sroa.0.1, %.bb7 ]
br i1 %cmp40, label %.bb11, label %.bb12
.bb11:
- store i32 %rA.sroa.0.2, i32* %2, align 4
- store i32 %rA.sroa.8.2, i32* %3, align 4
+ store i32 %rA.sroa.0.2, ptr %arrayidx3, align 4
+ store i32 %rA.sroa.8.2, ptr %arrayidx6, align 4
br label %.bb12
.bb12:
; between the arithmetic and the layout of allocated memory is
; entirely unknown.
-define i8* @test1(i8* %t) {
+define ptr @test1(ptr %t) {
; CHECK-LABEL: @test1(
-; CHECK-NEXT: [[TC:%.*]] = ptrtoint i8* [[T:%.*]] to i32
+; CHECK-NEXT: [[TC:%.*]] = ptrtoint ptr [[T:%.*]] to i32
; CHECK-NEXT: [[TA:%.*]] = add i32 [[TC]], 32
-; CHECK-NEXT: [[TV:%.*]] = inttoptr i32 [[TA]] to i8*
-; CHECK-NEXT: ret i8* [[TV]]
+; CHECK-NEXT: [[TV:%.*]] = inttoptr i32 [[TA]] to ptr
+; CHECK-NEXT: ret ptr [[TV]]
;
- %tc = ptrtoint i8* %t to i32
+ %tc = ptrtoint ptr %t to i32
%ta = add i32 %tc, 32
- %tv = inttoptr i32 %ta to i8*
- ret i8* %tv
+ %tv = inttoptr i32 %ta to ptr
+ ret ptr %tv
}
; These casts should be folded away.
-define i1 @test2(i8* %a, i8* %b) {
+define i1 @test2(ptr %a, ptr %b) {
; CHECK-LABEL: @test2(
-; CHECK-NEXT: [[R:%.*]] = icmp eq i8* [[A:%.*]], [[B:%.*]]
+; CHECK-NEXT: [[R:%.*]] = icmp eq ptr [[A:%.*]], [[B:%.*]]
; CHECK-NEXT: ret i1 [[R]]
;
- %ta = ptrtoint i8* %a to i32
- %tb = ptrtoint i8* %b to i32
+ %ta = ptrtoint ptr %a to i32
+ %tb = ptrtoint ptr %b to i32
%r = icmp eq i32 %ta, %tb
ret i1 %r
}
; These casts should be folded away.
-define i1 @test2_as2_same_int(i8 addrspace(2)* %a, i8 addrspace(2)* %b) {
+define i1 @test2_as2_same_int(ptr addrspace(2) %a, ptr addrspace(2) %b) {
; CHECK-LABEL: @test2_as2_same_int(
-; CHECK-NEXT: [[R:%.*]] = icmp eq i8 addrspace(2)* [[A:%.*]], [[B:%.*]]
+; CHECK-NEXT: [[R:%.*]] = icmp eq ptr addrspace(2) [[A:%.*]], [[B:%.*]]
; CHECK-NEXT: ret i1 [[R]]
;
- %ta = ptrtoint i8 addrspace(2)* %a to i16
- %tb = ptrtoint i8 addrspace(2)* %b to i16
+ %ta = ptrtoint ptr addrspace(2) %a to i16
+ %tb = ptrtoint ptr addrspace(2) %b to i16
%r = icmp eq i16 %ta, %tb
ret i1 %r
}
; These casts should be folded away.
-define i1 @test2_as2_larger(i8 addrspace(2)* %a, i8 addrspace(2)* %b) {
+define i1 @test2_as2_larger(ptr addrspace(2) %a, ptr addrspace(2) %b) {
; CHECK-LABEL: @test2_as2_larger(
-; CHECK-NEXT: [[R:%.*]] = icmp eq i8 addrspace(2)* [[A:%.*]], [[B:%.*]]
+; CHECK-NEXT: [[R:%.*]] = icmp eq ptr addrspace(2) [[A:%.*]], [[B:%.*]]
; CHECK-NEXT: ret i1 [[R]]
;
- %ta = ptrtoint i8 addrspace(2)* %a to i32
- %tb = ptrtoint i8 addrspace(2)* %b to i32
+ %ta = ptrtoint ptr addrspace(2) %a to i32
+ %tb = ptrtoint ptr addrspace(2) %b to i32
%r = icmp eq i32 %ta, %tb
ret i1 %r
}
; These casts should not be folded away.
-define i1 @test2_diff_as(i8* %p, i8 addrspace(1)* %q) {
+define i1 @test2_diff_as(ptr %p, ptr addrspace(1) %q) {
; CHECK-LABEL: @test2_diff_as(
-; CHECK-NEXT: [[I0:%.*]] = ptrtoint i8* [[P:%.*]] to i32
-; CHECK-NEXT: [[I1:%.*]] = ptrtoint i8 addrspace(1)* [[Q:%.*]] to i32
+; CHECK-NEXT: [[I0:%.*]] = ptrtoint ptr [[P:%.*]] to i32
+; CHECK-NEXT: [[I1:%.*]] = ptrtoint ptr addrspace(1) [[Q:%.*]] to i32
; CHECK-NEXT: [[R0:%.*]] = icmp sge i32 [[I0]], [[I1]]
; CHECK-NEXT: ret i1 [[R0]]
;
- %i0 = ptrtoint i8* %p to i32
- %i1 = ptrtoint i8 addrspace(1)* %q to i32
+ %i0 = ptrtoint ptr %p to i32
+ %i1 = ptrtoint ptr addrspace(1) %q to i32
%r0 = icmp sge i32 %i0, %i1
ret i1 %r0
}
; These casts should not be folded away.
-define i1 @test2_diff_as_global(i8 addrspace(1)* %q) {
+define i1 @test2_diff_as_global(ptr addrspace(1) %q) {
; CHECK-LABEL: @test2_diff_as_global(
-; CHECK-NEXT: [[I1:%.*]] = ptrtoint i8 addrspace(1)* [[Q:%.*]] to i32
-; CHECK-NEXT: [[R0:%.*]] = icmp sge i32 [[I1]], ptrtoint (i8* @global to i32)
+; CHECK-NEXT: [[I1:%.*]] = ptrtoint ptr addrspace(1) [[Q:%.*]] to i32
+; CHECK-NEXT: [[R0:%.*]] = icmp sge i32 [[I1]], ptrtoint (ptr @global to i32)
; CHECK-NEXT: ret i1 [[R0]]
;
- %i0 = ptrtoint i8* @global to i32
- %i1 = ptrtoint i8 addrspace(1)* %q to i32
+ %i0 = ptrtoint ptr @global to i32
+ %i1 = ptrtoint ptr addrspace(1) %q to i32
%r0 = icmp sge i32 %i1, %i0
ret i1 %r0
}
; These casts should also be folded away.
-define i1 @test3(i8* %a) {
+define i1 @test3(ptr %a) {
; CHECK-LABEL: @test3(
-; CHECK-NEXT: [[R:%.*]] = icmp eq i8* [[A:%.*]], @global
+; CHECK-NEXT: [[R:%.*]] = icmp eq ptr [[A:%.*]], @global
; CHECK-NEXT: ret i1 [[R]]
;
- %ta = ptrtoint i8* %a to i32
- %r = icmp eq i32 %ta, ptrtoint (i8* @global to i32)
+ %ta = ptrtoint ptr %a to i32
+ %r = icmp eq i32 %ta, ptrtoint (ptr @global to i32)
ret i1 %r
}
; CHECK-NEXT: [[C:%.*]] = icmp eq i32 [[A:%.*]], 0
; CHECK-NEXT: ret i1 [[C]]
;
- %B = inttoptr i32 %A to i8*
- %C = icmp eq i8* %B, null
+ %B = inttoptr i32 %A to ptr
+ %C = icmp eq ptr %B, null
ret i1 %C
}
; CHECK-NEXT: [[C:%.*]] = icmp eq i16 [[A:%.*]], 0
; CHECK-NEXT: ret i1 [[C]]
;
- %B = inttoptr i16 %A to i8 addrspace(2)*
- %C = icmp eq i8 addrspace(2)* %B, null
+ %B = inttoptr i16 %A to ptr addrspace(2)
+ %C = icmp eq ptr addrspace(2) %B, null
ret i1 %C
}
%op = type { float }
%unop = type { i32 }
-@Array = internal constant [1 x %op* (%op*)*] [ %op* (%op*)* @foo ]
+@Array = internal constant [1 x ptr] [ ptr @foo ]
-declare %op* @foo(%op* %X)
+declare ptr @foo(ptr %X)
-define %unop* @test5(%op* %O) {
+define ptr @test5(ptr %O) {
; CHECK-LABEL: @test5(
-; CHECK-NEXT: [[T_2:%.*]] = call %op* @foo(%op* [[O:%.*]])
-; CHECK-NEXT: [[TMP1:%.*]] = bitcast %op* [[T_2]] to %unop*
-; CHECK-NEXT: ret %unop* [[TMP1]]
+; CHECK-NEXT: [[T_2:%.*]] = call ptr @foo(ptr [[O:%.*]])
+; CHECK-NEXT: ret ptr [[T_2]]
;
- %t = load %unop* (%op*)*, %unop* (%op*)** bitcast ([1 x %op* (%op*)*]* @Array to %unop* (%op*)**); <%unop* (%op*)*> [#uses=1]
- %t.2 = call %unop* %t( %op* %O )
- ret %unop* %t.2
+ %t = load ptr, ptr @Array; <ptr> [#uses=1]
+ %t.2 = call ptr %t( ptr %O )
+ ret ptr %t.2
}
; InstCombine can not 'load (cast P)' -> cast (load P)' if the cast changes
; the address space.
-define i8 @test6(i8 addrspace(1)* %source) {
+define i8 @test6(ptr addrspace(1) %source) {
; CHECK-LABEL: @test6(
; CHECK-NEXT: entry:
-; CHECK-NEXT: [[ARRAYIDX223:%.*]] = addrspacecast i8 addrspace(1)* [[SOURCE:%.*]] to i8*
-; CHECK-NEXT: [[T4:%.*]] = load i8, i8* [[ARRAYIDX223]], align 1
+; CHECK-NEXT: [[ARRAYIDX223:%.*]] = addrspacecast ptr addrspace(1) [[SOURCE:%.*]] to ptr
+; CHECK-NEXT: [[T4:%.*]] = load i8, ptr [[ARRAYIDX223]], align 1
; CHECK-NEXT: ret i8 [[T4]]
;
entry:
- %arrayidx223 = addrspacecast i8 addrspace(1)* %source to i8*
- %t4 = load i8, i8* %arrayidx223
+ %arrayidx223 = addrspacecast ptr addrspace(1) %source to ptr
+ %t4 = load i8, ptr %arrayidx223
ret i8 %t4
}
-define <2 x i32> @insertelt(<2 x i32> %x, i32* %p, i133 %index) {
+define <2 x i32> @insertelt(<2 x i32> %x, ptr %p, i133 %index) {
; CHECK-LABEL: @insertelt(
-; CHECK-NEXT: [[TMP1:%.*]] = ptrtoint i32* [[P:%.*]] to i32
+; CHECK-NEXT: [[TMP1:%.*]] = ptrtoint ptr [[P:%.*]] to i32
; CHECK-NEXT: [[R:%.*]] = insertelement <2 x i32> [[X:%.*]], i32 [[TMP1]], i133 [[INDEX:%.*]]
; CHECK-NEXT: ret <2 x i32> [[R]]
;
- %v = inttoptr <2 x i32> %x to <2 x i32*>
- %i = insertelement <2 x i32*> %v, i32* %p, i133 %index
- %r = ptrtoint <2 x i32*> %i to <2 x i32>
+ %v = inttoptr <2 x i32> %x to <2 x ptr>
+ %i = insertelement <2 x ptr> %v, ptr %p, i133 %index
+ %r = ptrtoint <2 x ptr> %i to <2 x i32>
ret <2 x i32> %r
}
-define <2 x i32> @insertelt_intptr_trunc(<2 x i64> %x, i32* %p) {
+define <2 x i32> @insertelt_intptr_trunc(<2 x i64> %x, ptr %p) {
; CHECK-LABEL: @insertelt_intptr_trunc(
; CHECK-NEXT: [[TMP1:%.*]] = trunc <2 x i64> [[X:%.*]] to <2 x i32>
-; CHECK-NEXT: [[TMP2:%.*]] = ptrtoint i32* [[P:%.*]] to i32
+; CHECK-NEXT: [[TMP2:%.*]] = ptrtoint ptr [[P:%.*]] to i32
; CHECK-NEXT: [[R:%.*]] = insertelement <2 x i32> [[TMP1]], i32 [[TMP2]], i64 0
; CHECK-NEXT: ret <2 x i32> [[R]]
;
- %v = inttoptr <2 x i64> %x to <2 x i32*>
- %i = insertelement <2 x i32*> %v, i32* %p, i32 0
- %r = ptrtoint <2 x i32*> %i to <2 x i32>
+ %v = inttoptr <2 x i64> %x to <2 x ptr>
+ %i = insertelement <2 x ptr> %v, ptr %p, i32 0
+ %r = ptrtoint <2 x ptr> %i to <2 x i32>
ret <2 x i32> %r
}
-define <2 x i32> @insertelt_intptr_zext(<2 x i8> %x, i32* %p) {
+define <2 x i32> @insertelt_intptr_zext(<2 x i8> %x, ptr %p) {
; CHECK-LABEL: @insertelt_intptr_zext(
; CHECK-NEXT: [[TMP1:%.*]] = zext <2 x i8> [[X:%.*]] to <2 x i32>
-; CHECK-NEXT: [[TMP2:%.*]] = ptrtoint i32* [[P:%.*]] to i32
+; CHECK-NEXT: [[TMP2:%.*]] = ptrtoint ptr [[P:%.*]] to i32
; CHECK-NEXT: [[R:%.*]] = insertelement <2 x i32> [[TMP1]], i32 [[TMP2]], i64 1
; CHECK-NEXT: ret <2 x i32> [[R]]
;
- %v = inttoptr <2 x i8> %x to <2 x i32*>
- %i = insertelement <2 x i32*> %v, i32* %p, i32 1
- %r = ptrtoint <2 x i32*> %i to <2 x i32>
+ %v = inttoptr <2 x i8> %x to <2 x ptr>
+ %i = insertelement <2 x ptr> %v, ptr %p, i32 1
+ %r = ptrtoint <2 x ptr> %i to <2 x i32>
ret <2 x i32> %r
}
-define <2 x i64> @insertelt_intptr_zext_zext(<2 x i8> %x, i32* %p) {
+define <2 x i64> @insertelt_intptr_zext_zext(<2 x i8> %x, ptr %p) {
; CHECK-LABEL: @insertelt_intptr_zext_zext(
; CHECK-NEXT: [[TMP1:%.*]] = zext <2 x i8> [[X:%.*]] to <2 x i32>
-; CHECK-NEXT: [[TMP2:%.*]] = ptrtoint i32* [[P:%.*]] to i32
+; CHECK-NEXT: [[TMP2:%.*]] = ptrtoint ptr [[P:%.*]] to i32
; CHECK-NEXT: [[TMP3:%.*]] = insertelement <2 x i32> [[TMP1]], i32 [[TMP2]], i64 0
; CHECK-NEXT: [[R:%.*]] = zext <2 x i32> [[TMP3]] to <2 x i64>
; CHECK-NEXT: ret <2 x i64> [[R]]
;
- %v = inttoptr <2 x i8> %x to <2 x i32*>
- %i = insertelement <2 x i32*> %v, i32* %p, i32 0
- %r = ptrtoint <2 x i32*> %i to <2 x i64>
+ %v = inttoptr <2 x i8> %x to <2 x ptr>
+ %i = insertelement <2 x ptr> %v, ptr %p, i32 0
+ %r = ptrtoint <2 x ptr> %i to <2 x i64>
ret <2 x i64> %r
}
-declare void @use(<2 x i32*>)
+declare void @use(<2 x ptr>)
-define <2 x i32> @insertelt_extra_use1(<2 x i32> %x, i32* %p) {
+define <2 x i32> @insertelt_extra_use1(<2 x i32> %x, ptr %p) {
; CHECK-LABEL: @insertelt_extra_use1(
-; CHECK-NEXT: [[V:%.*]] = inttoptr <2 x i32> [[X:%.*]] to <2 x i32*>
-; CHECK-NEXT: call void @use(<2 x i32*> [[V]])
-; CHECK-NEXT: [[TMP1:%.*]] = ptrtoint i32* [[P:%.*]] to i32
+; CHECK-NEXT: [[V:%.*]] = inttoptr <2 x i32> [[X:%.*]] to <2 x ptr>
+; CHECK-NEXT: call void @use(<2 x ptr> [[V]])
+; CHECK-NEXT: [[TMP1:%.*]] = ptrtoint ptr [[P:%.*]] to i32
; CHECK-NEXT: [[R:%.*]] = insertelement <2 x i32> [[X]], i32 [[TMP1]], i64 0
; CHECK-NEXT: ret <2 x i32> [[R]]
;
- %v = inttoptr <2 x i32> %x to <2 x i32*>
- call void @use(<2 x i32*> %v)
- %i = insertelement <2 x i32*> %v, i32* %p, i32 0
- %r = ptrtoint <2 x i32*> %i to <2 x i32>
+ %v = inttoptr <2 x i32> %x to <2 x ptr>
+ call void @use(<2 x ptr> %v)
+ %i = insertelement <2 x ptr> %v, ptr %p, i32 0
+ %r = ptrtoint <2 x ptr> %i to <2 x i32>
ret <2 x i32> %r
}
-define <2 x i32> @insertelt_extra_use2(<2 x i32> %x, i32* %p) {
+define <2 x i32> @insertelt_extra_use2(<2 x i32> %x, ptr %p) {
; CHECK-LABEL: @insertelt_extra_use2(
-; CHECK-NEXT: [[V:%.*]] = inttoptr <2 x i32> [[X:%.*]] to <2 x i32*>
-; CHECK-NEXT: [[I:%.*]] = insertelement <2 x i32*> [[V]], i32* [[P:%.*]], i64 0
-; CHECK-NEXT: call void @use(<2 x i32*> [[I]])
-; CHECK-NEXT: [[R:%.*]] = ptrtoint <2 x i32*> [[I]] to <2 x i32>
+; CHECK-NEXT: [[V:%.*]] = inttoptr <2 x i32> [[X:%.*]] to <2 x ptr>
+; CHECK-NEXT: [[I:%.*]] = insertelement <2 x ptr> [[V]], ptr [[P:%.*]], i64 0
+; CHECK-NEXT: call void @use(<2 x ptr> [[I]])
+; CHECK-NEXT: [[R:%.*]] = ptrtoint <2 x ptr> [[I]] to <2 x i32>
; CHECK-NEXT: ret <2 x i32> [[R]]
;
- %v = inttoptr <2 x i32> %x to <2 x i32*>
- %i = insertelement <2 x i32*> %v, i32* %p, i32 0
- call void @use(<2 x i32*> %i)
- %r = ptrtoint <2 x i32*> %i to <2 x i32>
+ %v = inttoptr <2 x i32> %x to <2 x ptr>
+ %i = insertelement <2 x ptr> %v, ptr %p, i32 0
+ call void @use(<2 x ptr> %i)
+ %r = ptrtoint <2 x ptr> %i to <2 x i32>
ret <2 x i32> %r
}
ret <2 x i1> %cmp
}
-define i1 @ctlz_eq_other_i32_multiuse(i32 %x, i32* %p) {
+define i1 @ctlz_eq_other_i32_multiuse(i32 %x, ptr %p) {
; CHECK-LABEL: @ctlz_eq_other_i32_multiuse(
; CHECK-NEXT: [[LZ:%.*]] = tail call i32 @llvm.ctlz.i32(i32 [[X:%.*]], i1 false), [[RNG0:!range !.*]]
-; CHECK-NEXT: store i32 [[LZ]], i32* [[P:%.*]], align 4
+; CHECK-NEXT: store i32 [[LZ]], ptr [[P:%.*]], align 4
; CHECK-NEXT: [[CMP:%.*]] = icmp eq i32 [[LZ]], 24
; CHECK-NEXT: ret i1 [[CMP]]
;
%lz = tail call i32 @llvm.ctlz.i32(i32 %x, i1 false)
- store i32 %lz, i32* %p
+ store i32 %lz, ptr %p
%cmp = icmp eq i32 %lz, 24
ret i1 %cmp
}
ret i1 %cmp
}
-define i1 @ctlz_ugt_other_multiuse_i32(i32 %x, i32* %p) {
+define i1 @ctlz_ugt_other_multiuse_i32(i32 %x, ptr %p) {
; CHECK-LABEL: @ctlz_ugt_other_multiuse_i32(
; CHECK-NEXT: [[LZ:%.*]] = tail call i32 @llvm.ctlz.i32(i32 [[X:%.*]], i1 false), [[RNG0]]
-; CHECK-NEXT: store i32 [[LZ]], i32* [[P:%.*]], align 4
+; CHECK-NEXT: store i32 [[LZ]], ptr [[P:%.*]], align 4
; CHECK-NEXT: [[CMP:%.*]] = icmp ult i32 [[X]], 32768
; CHECK-NEXT: ret i1 [[CMP]]
;
%lz = tail call i32 @llvm.ctlz.i32(i32 %x, i1 false)
- store i32 %lz, i32* %p
+ store i32 %lz, ptr %p
%cmp = icmp ugt i32 %lz, 16
ret i1 %cmp
}
ret <2 x i1> %cmp
}
-define <2 x i1> @ctlz_ult_other_multiuse_v2i32(<2 x i32> %x, <2 x i32>* %p) {
+define <2 x i1> @ctlz_ult_other_multiuse_v2i32(<2 x i32> %x, ptr %p) {
; CHECK-LABEL: @ctlz_ult_other_multiuse_v2i32(
; CHECK-NEXT: [[LZ:%.*]] = tail call <2 x i32> @llvm.ctlz.v2i32(<2 x i32> [[X:%.*]], i1 false)
-; CHECK-NEXT: store <2 x i32> [[LZ]], <2 x i32>* [[P:%.*]], align 8
+; CHECK-NEXT: store <2 x i32> [[LZ]], ptr [[P:%.*]], align 8
; CHECK-NEXT: [[CMP:%.*]] = icmp ugt <2 x i32> [[X]], <i32 65535, i32 65535>
; CHECK-NEXT: ret <2 x i1> [[CMP]]
;
%lz = tail call <2 x i32> @llvm.ctlz.v2i32(<2 x i32> %x, i1 false)
- store <2 x i32> %lz, <2 x i32>* %p
+ store <2 x i32> %lz, ptr %p
%cmp = icmp ult <2 x i32> %lz, <i32 16, i32 16>
ret <2 x i1> %cmp
}
ret <2 x i1> %cmp
}
-define i1 @cttz_eq_other_i33_multiuse(i33 %x, i33* %p) {
+define i1 @cttz_eq_other_i33_multiuse(i33 %x, ptr %p) {
; CHECK-LABEL: @cttz_eq_other_i33_multiuse(
; CHECK-NEXT: [[TZ:%.*]] = tail call i33 @llvm.cttz.i33(i33 [[X:%.*]], i1 false), [[RNG1:!range !.*]]
-; CHECK-NEXT: store i33 [[TZ]], i33* [[P:%.*]], align 4
+; CHECK-NEXT: store i33 [[TZ]], ptr [[P:%.*]], align 4
; CHECK-NEXT: [[CMP:%.*]] = icmp eq i33 [[TZ]], 4
; CHECK-NEXT: ret i1 [[CMP]]
;
%tz = tail call i33 @llvm.cttz.i33(i33 %x, i1 false)
- store i33 %tz, i33* %p
+ store i33 %tz, ptr %p
%cmp = icmp eq i33 %tz, 4
ret i1 %cmp
}
ret i1 %cmp
}
-define i1 @cttz_ugt_other_multiuse_i33(i33 %x, i33* %p) {
+define i1 @cttz_ugt_other_multiuse_i33(i33 %x, ptr %p) {
; CHECK-LABEL: @cttz_ugt_other_multiuse_i33(
; CHECK-NEXT: [[TZ:%.*]] = tail call i33 @llvm.cttz.i33(i33 [[X:%.*]], i1 false), [[RNG1]]
-; CHECK-NEXT: store i33 [[TZ]], i33* [[P:%.*]], align 4
+; CHECK-NEXT: store i33 [[TZ]], ptr [[P:%.*]], align 4
; CHECK-NEXT: [[CMP:%.*]] = icmp ugt i33 [[TZ]], 16
; CHECK-NEXT: ret i1 [[CMP]]
;
%tz = tail call i33 @llvm.cttz.i33(i33 %x, i1 false)
- store i33 %tz, i33* %p
+ store i33 %tz, ptr %p
%cmp = icmp ugt i33 %tz, 16
ret i1 %cmp
}
ret <2 x i1> %cmp
}
-define <2 x i1> @cttz_ult_other_multiuse_v2i32(<2 x i32> %x, <2 x i32>* %p) {
+define <2 x i1> @cttz_ult_other_multiuse_v2i32(<2 x i32> %x, ptr %p) {
; CHECK-LABEL: @cttz_ult_other_multiuse_v2i32(
; CHECK-NEXT: [[TZ:%.*]] = tail call <2 x i32> @llvm.cttz.v2i32(<2 x i32> [[X:%.*]], i1 false)
-; CHECK-NEXT: store <2 x i32> [[TZ]], <2 x i32>* [[P:%.*]], align 8
+; CHECK-NEXT: store <2 x i32> [[TZ]], ptr [[P:%.*]], align 8
; CHECK-NEXT: [[CMP:%.*]] = icmp ult <2 x i32> [[TZ]], <i32 16, i32 16>
; CHECK-NEXT: ret <2 x i1> [[CMP]]
;
%tz = tail call <2 x i32> @llvm.cttz.v2i32(<2 x i32> %x, i1 false)
- store <2 x i32> %tz, <2 x i32>* %p
+ store <2 x i32> %tz, ptr %p
%cmp = icmp ult <2 x i32> %tz, <i32 16, i32 16>
ret <2 x i1> %cmp
}
ret <2 x i1> %cmp
}
-define i1 @ctpop_ugt_bitwidth_minus_one_i8(i8 %x, i8* %p) {
+define i1 @ctpop_ugt_bitwidth_minus_one_i8(i8 %x, ptr %p) {
; CHECK-LABEL: @ctpop_ugt_bitwidth_minus_one_i8(
; CHECK-NEXT: [[POP:%.*]] = tail call i8 @llvm.ctpop.i8(i8 [[X:%.*]]), [[RNG2:!range !.*]]
-; CHECK-NEXT: store i8 [[POP]], i8* [[P:%.*]], align 1
+; CHECK-NEXT: store i8 [[POP]], ptr [[P:%.*]], align 1
; CHECK-NEXT: [[CMP:%.*]] = icmp eq i8 [[X]], -1
; CHECK-NEXT: ret i1 [[CMP]]
;
%pop = tail call i8 @llvm.ctpop.i8(i8 %x)
- store i8 %pop, i8* %p
+ store i8 %pop, ptr %p
%cmp = icmp ugt i8 %pop, 7
ret i1 %cmp
}
ret i1 %r
}
-define <2 x i1> @shift_trunc_signbit_test_vec_uses(<2 x i17> %x, <2 x i17>* %p1, <2 x i13>* %p2) {
+define <2 x i1> @shift_trunc_signbit_test_vec_uses(<2 x i17> %x, ptr %p1, ptr %p2) {
; CHECK-LABEL: @shift_trunc_signbit_test_vec_uses(
; CHECK-NEXT: [[SH:%.*]] = lshr <2 x i17> [[X:%.*]], <i17 4, i17 4>
-; CHECK-NEXT: store <2 x i17> [[SH]], <2 x i17>* [[P1:%.*]], align 8
+; CHECK-NEXT: store <2 x i17> [[SH]], ptr [[P1:%.*]], align 8
; CHECK-NEXT: [[TR:%.*]] = trunc <2 x i17> [[SH]] to <2 x i13>
-; CHECK-NEXT: store <2 x i13> [[TR]], <2 x i13>* [[P2:%.*]], align 4
+; CHECK-NEXT: store <2 x i13> [[TR]], ptr [[P2:%.*]], align 4
; CHECK-NEXT: [[R:%.*]] = icmp sgt <2 x i17> [[X]], <i17 -1, i17 -1>
; CHECK-NEXT: ret <2 x i1> [[R]]
;
%sh = lshr <2 x i17> %x, <i17 4, i17 4>
- store <2 x i17> %sh, <2 x i17>* %p1
+ store <2 x i17> %sh, ptr %p1
%tr = trunc <2 x i17> %sh to <2 x i13>
- store <2 x i13> %tr, <2 x i13>* %p2
+ store <2 x i13> %tr, ptr %p2
%r = icmp sgt <2 x i13> %tr, <i13 -1, i13 -1>
ret <2 x i1> %r
}
%test1.struct = type { i32, i32 }
@test1.aligned_glbl = global %test1.struct zeroinitializer, align 4
-define void @test1(i64 *%ptr) {
+define void @test1(ptr %ptr) {
; CHECK-LABEL: @test1(
-; CHECK-NEXT: store i64 0, i64* [[PTR:%.*]], align 8
+; CHECK-NEXT: store i64 0, ptr [[PTR:%.*]], align 8
; CHECK-NEXT: ret void
;
- store i64 and (i64 ptrtoint (i32* getelementptr (%test1.struct, %test1.struct* @test1.aligned_glbl, i32 0, i32 1) to i64), i64 3), i64* %ptr
+ store i64 and (i64 ptrtoint (ptr getelementptr (%test1.struct, ptr @test1.aligned_glbl, i32 0, i32 1) to i64), i64 3), ptr %ptr
ret void
}
define i64 @OpenFilter(i64 %x) {
; CHECK-LABEL: @OpenFilter(
-; CHECK-NEXT: [[T:%.*]] = sub i64 [[X:%.*]], zext (i8 ptrtoint ([9 x i8]* @channel_wg4idx to i8) to i64)
+; CHECK-NEXT: [[T:%.*]] = sub i64 [[X:%.*]], zext (i8 ptrtoint (ptr @channel_wg4idx to i8) to i64)
; CHECK-NEXT: [[R:%.*]] = and i64 [[T]], 255
; CHECK-NEXT: ret i64 [[R]]
;
- %sub = sub i64 %x, ptrtoint ([9 x i8]* @channel_wg4idx to i64)
+ %sub = sub i64 %x, ptrtoint (ptr @channel_wg4idx to i64)
%t = trunc i64 %sub to i8
%r = zext i8 %t to i64
ret i64 %r
@G2 = global i32 42
@G3 = global [4 x i8] zeroinitializer, align 1
-@A1 = alias i32, bitcast (i8* getelementptr inbounds ([4 x i8], [4 x i8]* @G3, i32 0, i32 2) to i32*)
-@A2 = alias i32, inttoptr (i64 and (i64 ptrtoint (i8* getelementptr inbounds ([4 x i8], [4 x i8]* @G3, i32 0, i32 3) to i64), i64 -4) to i32*)
+@A1 = alias i32, getelementptr inbounds ([4 x i8], ptr @G3, i32 0, i32 2)
+@A2 = alias i32, inttoptr (i64 and (i64 ptrtoint (ptr getelementptr inbounds ([4 x i8], ptr @G3, i32 0, i32 3) to i64), i64 -4) to ptr)
define i64 @f1() {
; This cannot be constant folded because G1 is underaligned.
; CHECK-LABEL: @f1(
; CHECK: ret i64 and
- ret i64 and (i64 ptrtoint (i32* @G1 to i64), i64 1)
+ ret i64 and (i64 ptrtoint (ptr @G1 to i64), i64 1)
}
define i64 @f2() {
; The preferred alignment for G2 allows this one to foled to zero.
; CHECK-LABEL: @f2(
; CHECK: ret i64 0
- ret i64 and (i64 ptrtoint (i32* @G2 to i64), i64 1)
+ ret i64 and (i64 ptrtoint (ptr @G2 to i64), i64 1)
}
define i64 @g1() {
; This cannot be constant folded because A1 aliases G3 which is underalaigned.
; CHECK-LABEL: @g1(
; CHECK: ret i64 and
- ret i64 and (i64 ptrtoint (i32* @A1 to i64), i64 1)
+ ret i64 and (i64 ptrtoint (ptr @A1 to i64), i64 1)
}
define i64 @g2() {
; certain alignment allowing this to fold to zero.
; CHECK-LABEL: @g2(
; CHECK: ret i64 0
- ret i64 and (i64 ptrtoint (i32* @A2 to i64), i64 1)
+ ret i64 and (i64 ptrtoint (ptr @A2 to i64), i64 1)
}
define i32 @a() nounwind readnone {
entry:
- ret i32 zext (i1 icmp eq (i32 0, i32 ptrtoint (i32 ()* @a to i32)) to i32)
+ ret i32 zext (i1 icmp eq (i32 0, i32 ptrtoint (ptr @a to i32)) to i32)
}
; CHECK: ret i32 0
define void @frob() {
; CHECK-LABEL: @frob(
-; CHECK-NEXT: store i32 1, i32* getelementptr inbounds ([3 x %struct.X], [3 x %struct.X]* @Y, i64 0, i64 0, i32 0, i64 0), align 16
-; CHECK-NEXT: store i32 1, i32* getelementptr inbounds ([3 x %struct.X], [3 x %struct.X]* @Y, i64 0, i64 0, i32 0, i64 1), align 4
-; CHECK-NEXT: store i32 1, i32* getelementptr inbounds ([3 x %struct.X], [3 x %struct.X]* @Y, i64 0, i64 0, i32 0, i64 2), align 8
-; CHECK-NEXT: store i32 1, i32* getelementptr inbounds ([3 x %struct.X], [3 x %struct.X]* @Y, i64 0, i64 0, i32 1, i64 0), align 4
-; CHECK-NEXT: store i32 1, i32* getelementptr inbounds ([3 x %struct.X], [3 x %struct.X]* @Y, i64 0, i64 0, i32 1, i64 1), align 16
-; CHECK-NEXT: store i32 1, i32* getelementptr inbounds ([3 x %struct.X], [3 x %struct.X]* @Y, i64 0, i64 0, i32 1, i64 2), align 4
-; CHECK-NEXT: store i32 1, i32* getelementptr inbounds ([3 x %struct.X], [3 x %struct.X]* @Y, i64 0, i64 1, i32 0, i64 0), align 8
-; CHECK-NEXT: store i32 1, i32* getelementptr inbounds ([3 x %struct.X], [3 x %struct.X]* @Y, i64 0, i64 1, i32 0, i64 1), align 4
-; CHECK-NEXT: store i32 1, i32* getelementptr inbounds ([3 x %struct.X], [3 x %struct.X]* @Y, i64 0, i64 1, i32 0, i64 2), align 16
-; CHECK-NEXT: store i32 1, i32* getelementptr inbounds ([3 x %struct.X], [3 x %struct.X]* @Y, i64 0, i64 1, i32 1, i64 0), align 4
-; CHECK-NEXT: store i32 1, i32* getelementptr inbounds ([3 x %struct.X], [3 x %struct.X]* @Y, i64 0, i64 1, i32 1, i64 1), align 8
-; CHECK-NEXT: store i32 1, i32* getelementptr inbounds ([3 x %struct.X], [3 x %struct.X]* @Y, i64 0, i64 1, i32 1, i64 2), align 4
-; CHECK-NEXT: store i32 1, i32* getelementptr inbounds ([3 x %struct.X], [3 x %struct.X]* @Y, i64 0, i64 2, i32 0, i64 0), align 16
-; CHECK-NEXT: store i32 1, i32* getelementptr inbounds ([3 x %struct.X], [3 x %struct.X]* @Y, i64 0, i64 2, i32 0, i64 1), align 4
-; CHECK-NEXT: store i32 1, i32* getelementptr inbounds ([3 x %struct.X], [3 x %struct.X]* @Y, i64 0, i64 2, i32 0, i64 2), align 8
-; CHECK-NEXT: store i32 1, i32* getelementptr inbounds ([3 x %struct.X], [3 x %struct.X]* @Y, i64 0, i64 2, i32 1, i64 0), align 4
-; CHECK-NEXT: store i32 1, i32* getelementptr inbounds ([3 x %struct.X], [3 x %struct.X]* @Y, i64 0, i64 2, i32 1, i64 1), align 16
-; CHECK-NEXT: store i32 1, i32* getelementptr inbounds ([3 x %struct.X], [3 x %struct.X]* @Y, i64 0, i64 2, i32 1, i64 2), align 4
-; CHECK-NEXT: store i32 1, i32* getelementptr inbounds ([3 x %struct.X], [3 x %struct.X]* @Y, i64 1, i64 0, i32 0, i64 0), align 8
-; CHECK-NEXT: store i32 1, i32* getelementptr ([3 x %struct.X], [3 x %struct.X]* @Y, i64 2, i64 0, i32 0, i64 0), align 16
-; CHECK-NEXT: store i32 1, i32* getelementptr ([3 x %struct.X], [3 x %struct.X]* @Y, i64 1, i64 0, i32 0, i64 1), align 8
+; CHECK-NEXT: store i32 1, ptr @Y, align 16
+; CHECK-NEXT: store i32 1, ptr getelementptr inbounds ([3 x %struct.X], ptr @Y, i64 0, i64 0, i32 0, i64 1), align 4
+; CHECK-NEXT: store i32 1, ptr getelementptr inbounds ([3 x %struct.X], ptr @Y, i64 0, i64 0, i32 0, i64 2), align 8
+; CHECK-NEXT: store i32 1, ptr getelementptr inbounds ([3 x %struct.X], ptr @Y, i64 0, i64 0, i32 1, i64 0), align 4
+; CHECK-NEXT: store i32 1, ptr getelementptr inbounds ([3 x %struct.X], ptr @Y, i64 0, i64 0, i32 1, i64 1), align 16
+; CHECK-NEXT: store i32 1, ptr getelementptr inbounds ([3 x %struct.X], ptr @Y, i64 0, i64 0, i32 1, i64 2), align 4
+; CHECK-NEXT: store i32 1, ptr getelementptr inbounds ([3 x %struct.X], ptr @Y, i64 0, i64 1, i32 0, i64 0), align 8
+; CHECK-NEXT: store i32 1, ptr getelementptr inbounds ([3 x %struct.X], ptr @Y, i64 0, i64 1, i32 0, i64 1), align 4
+; CHECK-NEXT: store i32 1, ptr getelementptr inbounds ([3 x %struct.X], ptr @Y, i64 0, i64 1, i32 0, i64 2), align 16
+; CHECK-NEXT: store i32 1, ptr getelementptr inbounds ([3 x %struct.X], ptr @Y, i64 0, i64 1, i32 1, i64 0), align 4
+; CHECK-NEXT: store i32 1, ptr getelementptr inbounds ([3 x %struct.X], ptr @Y, i64 0, i64 1, i32 1, i64 1), align 8
+; CHECK-NEXT: store i32 1, ptr getelementptr inbounds ([3 x %struct.X], ptr @Y, i64 0, i64 1, i32 1, i64 2), align 4
+; CHECK-NEXT: store i32 1, ptr getelementptr inbounds ([3 x %struct.X], ptr @Y, i64 0, i64 2, i32 0, i64 0), align 16
+; CHECK-NEXT: store i32 1, ptr getelementptr inbounds ([3 x %struct.X], ptr @Y, i64 0, i64 2, i32 0, i64 1), align 4
+; CHECK-NEXT: store i32 1, ptr getelementptr inbounds ([3 x %struct.X], ptr @Y, i64 0, i64 2, i32 0, i64 2), align 8
+; CHECK-NEXT: store i32 1, ptr getelementptr inbounds ([3 x %struct.X], ptr @Y, i64 0, i64 2, i32 1, i64 0), align 4
+; CHECK-NEXT: store i32 1, ptr getelementptr inbounds ([3 x %struct.X], ptr @Y, i64 0, i64 2, i32 1, i64 1), align 16
+; CHECK-NEXT: store i32 1, ptr getelementptr inbounds ([3 x %struct.X], ptr @Y, i64 0, i64 2, i32 1, i64 2), align 4
+; CHECK-NEXT: store i32 1, ptr getelementptr inbounds ([3 x %struct.X], ptr @Y, i64 1, i64 0, i32 0, i64 0), align 8
+; CHECK-NEXT: store i32 1, ptr getelementptr ([3 x %struct.X], ptr @Y, i64 2, i64 0, i32 0, i64 0), align 16
+; CHECK-NEXT: store i32 1, ptr getelementptr ([3 x %struct.X], ptr @Y, i64 1, i64 0, i32 0, i64 1), align 8
; CHECK-NEXT: ret void
;
- store i32 1, i32* getelementptr ([3 x %struct.X], [3 x %struct.X]* @Y, i64 0, i64 0, i32 0, i64 0), align 4
- store i32 1, i32* getelementptr ([3 x %struct.X], [3 x %struct.X]* @Y, i64 0, i64 0, i32 0, i64 1), align 4
- store i32 1, i32* getelementptr ([3 x %struct.X], [3 x %struct.X]* @Y, i64 0, i64 0, i32 0, i64 2), align 4
- store i32 1, i32* getelementptr ([3 x %struct.X], [3 x %struct.X]* @Y, i64 0, i64 0, i32 0, i64 3), align 4
- store i32 1, i32* getelementptr ([3 x %struct.X], [3 x %struct.X]* @Y, i64 0, i64 0, i32 0, i64 4), align 4
- store i32 1, i32* getelementptr ([3 x %struct.X], [3 x %struct.X]* @Y, i64 0, i64 0, i32 0, i64 5), align 4
- store i32 1, i32* getelementptr ([3 x %struct.X], [3 x %struct.X]* @Y, i64 0, i64 0, i32 0, i64 6), align 4
- store i32 1, i32* getelementptr ([3 x %struct.X], [3 x %struct.X]* @Y, i64 0, i64 0, i32 0, i64 7), align 4
- store i32 1, i32* getelementptr ([3 x %struct.X], [3 x %struct.X]* @Y, i64 0, i64 0, i32 0, i64 8), align 4
- store i32 1, i32* getelementptr ([3 x %struct.X], [3 x %struct.X]* @Y, i64 0, i64 0, i32 0, i64 9), align 4
- store i32 1, i32* getelementptr ([3 x %struct.X], [3 x %struct.X]* @Y, i64 0, i64 0, i32 0, i64 10), align 4
- store i32 1, i32* getelementptr ([3 x %struct.X], [3 x %struct.X]* @Y, i64 0, i64 0, i32 0, i64 11), align 4
- store i32 1, i32* getelementptr ([3 x %struct.X], [3 x %struct.X]* @Y, i64 0, i64 0, i32 0, i64 12), align 4
- store i32 1, i32* getelementptr ([3 x %struct.X], [3 x %struct.X]* @Y, i64 0, i64 0, i32 0, i64 13), align 4
- store i32 1, i32* getelementptr ([3 x %struct.X], [3 x %struct.X]* @Y, i64 0, i64 0, i32 0, i64 14), align 8
- store i32 1, i32* getelementptr ([3 x %struct.X], [3 x %struct.X]* @Y, i64 0, i64 0, i32 0, i64 15), align 4
- store i32 1, i32* getelementptr ([3 x %struct.X], [3 x %struct.X]* @Y, i64 0, i64 0, i32 0, i64 16), align 8
- store i32 1, i32* getelementptr ([3 x %struct.X], [3 x %struct.X]* @Y, i64 0, i64 0, i32 0, i64 17), align 4
- store i32 1, i32* getelementptr ([3 x %struct.X], [3 x %struct.X]* @Y, i64 0, i64 0, i32 0, i64 18), align 8
- store i32 1, i32* getelementptr ([3 x %struct.X], [3 x %struct.X]* @Y, i64 0, i64 0, i32 0, i64 36), align 8
- store i32 1, i32* getelementptr ([3 x %struct.X], [3 x %struct.X]* @Y, i64 0, i64 0, i32 0, i64 19), align 8
+ store i32 1, ptr @Y, align 4
+ store i32 1, ptr getelementptr ([3 x %struct.X], ptr @Y, i64 0, i64 0, i32 0, i64 1), align 4
+ store i32 1, ptr getelementptr ([3 x %struct.X], ptr @Y, i64 0, i64 0, i32 0, i64 2), align 4
+ store i32 1, ptr getelementptr ([3 x %struct.X], ptr @Y, i64 0, i64 0, i32 0, i64 3), align 4
+ store i32 1, ptr getelementptr ([3 x %struct.X], ptr @Y, i64 0, i64 0, i32 0, i64 4), align 4
+ store i32 1, ptr getelementptr ([3 x %struct.X], ptr @Y, i64 0, i64 0, i32 0, i64 5), align 4
+ store i32 1, ptr getelementptr ([3 x %struct.X], ptr @Y, i64 0, i64 0, i32 0, i64 6), align 4
+ store i32 1, ptr getelementptr ([3 x %struct.X], ptr @Y, i64 0, i64 0, i32 0, i64 7), align 4
+ store i32 1, ptr getelementptr ([3 x %struct.X], ptr @Y, i64 0, i64 0, i32 0, i64 8), align 4
+ store i32 1, ptr getelementptr ([3 x %struct.X], ptr @Y, i64 0, i64 0, i32 0, i64 9), align 4
+ store i32 1, ptr getelementptr ([3 x %struct.X], ptr @Y, i64 0, i64 0, i32 0, i64 10), align 4
+ store i32 1, ptr getelementptr ([3 x %struct.X], ptr @Y, i64 0, i64 0, i32 0, i64 11), align 4
+ store i32 1, ptr getelementptr ([3 x %struct.X], ptr @Y, i64 0, i64 0, i32 0, i64 12), align 4
+ store i32 1, ptr getelementptr ([3 x %struct.X], ptr @Y, i64 0, i64 0, i32 0, i64 13), align 4
+ store i32 1, ptr getelementptr ([3 x %struct.X], ptr @Y, i64 0, i64 0, i32 0, i64 14), align 8
+ store i32 1, ptr getelementptr ([3 x %struct.X], ptr @Y, i64 0, i64 0, i32 0, i64 15), align 4
+ store i32 1, ptr getelementptr ([3 x %struct.X], ptr @Y, i64 0, i64 0, i32 0, i64 16), align 8
+ store i32 1, ptr getelementptr ([3 x %struct.X], ptr @Y, i64 0, i64 0, i32 0, i64 17), align 4
+ store i32 1, ptr getelementptr ([3 x %struct.X], ptr @Y, i64 0, i64 0, i32 0, i64 18), align 8
+ store i32 1, ptr getelementptr ([3 x %struct.X], ptr @Y, i64 0, i64 0, i32 0, i64 36), align 8
+ store i32 1, ptr getelementptr ([3 x %struct.X], ptr @Y, i64 0, i64 0, i32 0, i64 19), align 8
ret void
}
; CHECK-NEXT: ret i64 1000
;
entry:
- %A = bitcast i8* getelementptr inbounds ([1000 x i8], [1000 x i8]* @X, i64 1, i64 0) to i8*
- %B = bitcast i8* getelementptr inbounds ([1000 x i8], [1000 x i8]* @X, i64 0, i64 0) to i8*
+ %A = bitcast ptr getelementptr inbounds ([1000 x i8], ptr @X, i64 1, i64 0) to ptr
- %B2 = ptrtoint i8* %B to i64
+ %B2 = ptrtoint ptr @X to i64
%C = sub i64 0, %B2
- %D = getelementptr i8, i8* %A, i64 %C
- %E = ptrtoint i8* %D to i64
+ %D = getelementptr i8, ptr %A, i64 %C
+ %E = ptrtoint ptr %D to i64
ret i64 %E
}
;
entry:
- %A = bitcast i8 addrspace(1)* getelementptr inbounds ([1000 x i8], [1000 x i8] addrspace(1)* @X_as1, i64 1, i64 0) to i8 addrspace(1)*
- %B = bitcast i8 addrspace(1)* getelementptr inbounds ([1000 x i8], [1000 x i8] addrspace(1)* @X_as1, i64 0, i64 0) to i8 addrspace(1)*
+ %A = bitcast ptr addrspace(1) getelementptr inbounds ([1000 x i8], ptr addrspace(1) @X_as1, i64 1, i64 0) to ptr addrspace(1)
- %B2 = ptrtoint i8 addrspace(1)* %B to i16
+ %B2 = ptrtoint ptr addrspace(1) @X_as1 to i16
%C = sub i16 0, %B2
- %D = getelementptr i8, i8 addrspace(1)* %A, i16 %C
- %E = ptrtoint i8 addrspace(1)* %D to i16
+ %D = getelementptr i8, ptr addrspace(1) %A, i16 %C
+ %E = ptrtoint ptr addrspace(1) %D to i16
ret i16 %E
}
; an offset of 8-byte.
; Every element in the @CallerInfos array is 16-byte aligned so
; any access from the following gep is 8-byte aligned.
-%struct.CallerInfo = type { i8*, i32 }
+%struct.CallerInfo = type { ptr, i32 }
@CallerInfos = global [128 x %struct.CallerInfo] zeroinitializer, align 16
define i32 @test_gep_in_struct(i64 %idx) {
; CHECK-LABEL: @test_gep_in_struct(
-; CHECK-NEXT: [[NS7:%.*]] = getelementptr inbounds [128 x %struct.CallerInfo], [128 x %struct.CallerInfo]* @CallerInfos, i64 0, i64 [[IDX:%.*]], i32 1
-; CHECK-NEXT: [[RES:%.*]] = load i32, i32* [[NS7]], align 8
+; CHECK-NEXT: [[NS7:%.*]] = getelementptr inbounds [128 x %struct.CallerInfo], ptr @CallerInfos, i64 0, i64 [[IDX:%.*]], i32 1
+; CHECK-NEXT: [[RES:%.*]] = load i32, ptr [[NS7]], align 8
; CHECK-NEXT: ret i32 [[RES]]
;
- %NS7 = getelementptr inbounds [128 x %struct.CallerInfo], [128 x %struct.CallerInfo]* @CallerInfos, i64 0, i64 %idx, i32 1
- %res = load i32, i32* %NS7, align 1
+ %NS7 = getelementptr inbounds [128 x %struct.CallerInfo], ptr @CallerInfos, i64 0, i64 %idx, i32 1
+ %res = load i32, ptr %NS7, align 1
ret i32 %res
}
@g2 = external global i8
declare i64 @get.i64()
-declare void @use.ptr(i8*)
+declare void @use.ptr(ptr)
-define i8* @gep_sub_self() {
+define ptr @gep_sub_self() {
; CHECK-LABEL: @gep_sub_self(
-; CHECK-NEXT: ret i8* getelementptr (i8, i8* @g, i64 sub (i64 0, i64 ptrtoint (i8* @g to i64)))
+; CHECK-NEXT: ret ptr getelementptr (i8, ptr @g, i64 sub (i64 0, i64 ptrtoint (ptr @g to i64)))
;
- %p.int = ptrtoint i8* @g to i64
+ %p.int = ptrtoint ptr @g to i64
%p.int.neg = sub i64 0, %p.int
- %p1 = getelementptr i8, i8* @g, i64 %p.int.neg
- ret i8* %p1
+ %p1 = getelementptr i8, ptr @g, i64 %p.int.neg
+ ret ptr %p1
}
-define i8* @gep_sub_self_plus_addr(i64 %addr) {
+define ptr @gep_sub_self_plus_addr(i64 %addr) {
; CHECK-LABEL: @gep_sub_self_plus_addr(
-; CHECK-NEXT: [[P2:%.*]] = getelementptr i8, i8* getelementptr (i8, i8* @g, i64 sub (i64 0, i64 ptrtoint (i8* @g to i64))), i64 [[ADDR:%.*]]
-; CHECK-NEXT: ret i8* [[P2]]
+; CHECK-NEXT: [[P2:%.*]] = getelementptr i8, ptr getelementptr (i8, ptr @g, i64 sub (i64 0, i64 ptrtoint (ptr @g to i64))), i64 [[ADDR:%.*]]
+; CHECK-NEXT: ret ptr [[P2]]
;
- %p.int = ptrtoint i8* @g to i64
+ %p.int = ptrtoint ptr @g to i64
%p.int.neg = sub i64 0, %p.int
- %p1 = getelementptr i8, i8* @g, i64 %p.int.neg
- %p2 = getelementptr i8, i8* %p1, i64 %addr
- ret i8* %p2
+ %p1 = getelementptr i8, ptr @g, i64 %p.int.neg
+ %p2 = getelementptr i8, ptr %p1, i64 %addr
+ ret ptr %p2
}
-define i8* @gep_plus_addr_sub_self(i64 %addr) {
+define ptr @gep_plus_addr_sub_self(i64 %addr) {
; CHECK-LABEL: @gep_plus_addr_sub_self(
-; CHECK-NEXT: [[P1:%.*]] = getelementptr i8, i8* @g, i64 [[ADDR:%.*]]
-; CHECK-NEXT: [[P2:%.*]] = getelementptr i8, i8* [[P1]], i64 sub (i64 0, i64 ptrtoint (i8* @g to i64))
-; CHECK-NEXT: ret i8* [[P2]]
+; CHECK-NEXT: [[P1:%.*]] = getelementptr i8, ptr @g, i64 [[ADDR:%.*]]
+; CHECK-NEXT: [[P2:%.*]] = getelementptr i8, ptr [[P1]], i64 sub (i64 0, i64 ptrtoint (ptr @g to i64))
+; CHECK-NEXT: ret ptr [[P2]]
;
- %p.int = ptrtoint i8* @g to i64
+ %p.int = ptrtoint ptr @g to i64
%p.int.neg = sub i64 0, %p.int
- %p1 = getelementptr i8, i8* @g, i64 %addr
- %p2 = getelementptr i8, i8* %p1, i64 %p.int.neg
- ret i8* %p2
+ %p1 = getelementptr i8, ptr @g, i64 %addr
+ %p2 = getelementptr i8, ptr %p1, i64 %p.int.neg
+ ret ptr %p2
}
-define i8* @gep_plus_addr_sub_self_in_loop() {
+define ptr @gep_plus_addr_sub_self_in_loop() {
; CHECK-LABEL: @gep_plus_addr_sub_self_in_loop(
; CHECK-NEXT: br label [[LOOP:%.*]]
; CHECK: loop:
; CHECK-NEXT: [[ADDR:%.*]] = call i64 @get.i64()
-; CHECK-NEXT: [[P2:%.*]] = getelementptr i8, i8* getelementptr (i8, i8* @g, i64 sub (i64 0, i64 ptrtoint (i8* @g to i64))), i64 [[ADDR]]
-; CHECK-NEXT: call void @use.ptr(i8* [[P2]])
+; CHECK-NEXT: [[P2:%.*]] = getelementptr i8, ptr getelementptr (i8, ptr @g, i64 sub (i64 0, i64 ptrtoint (ptr @g to i64))), i64 [[ADDR]]
+; CHECK-NEXT: call void @use.ptr(ptr [[P2]])
; CHECK-NEXT: br label [[LOOP]]
;
- %p.int = ptrtoint i8* @g to i64
+ %p.int = ptrtoint ptr @g to i64
%p.int.neg = sub i64 0, %p.int
br label %loop
loop:
%addr = call i64 @get.i64()
- %p1 = getelementptr i8, i8* @g, i64 %addr
- %p2 = getelementptr i8, i8* %p1, i64 %p.int.neg
- call void @use.ptr(i8* %p2)
+ %p1 = getelementptr i8, ptr @g, i64 %addr
+ %p2 = getelementptr i8, ptr %p1, i64 %p.int.neg
+ call void @use.ptr(ptr %p2)
br label %loop
}
-define i8* @gep_sub_other() {
+define ptr @gep_sub_other() {
; CHECK-LABEL: @gep_sub_other(
-; CHECK-NEXT: ret i8* getelementptr (i8, i8* @g, i64 sub (i64 0, i64 ptrtoint (i8* @g2 to i64)))
+; CHECK-NEXT: ret ptr getelementptr (i8, ptr @g, i64 sub (i64 0, i64 ptrtoint (ptr @g2 to i64)))
;
- %p.int = ptrtoint i8* @g2 to i64
+ %p.int = ptrtoint ptr @g2 to i64
%p.int.neg = sub i64 0, %p.int
- %p1 = getelementptr i8, i8* @g, i64 %p.int.neg
- ret i8* %p1
+ %p1 = getelementptr i8, ptr @g, i64 %p.int.neg
+ ret ptr %p1
}
define i64 @gep_sub_other_to_int() {
; CHECK-LABEL: @gep_sub_other_to_int(
-; CHECK-NEXT: ret i64 sub (i64 ptrtoint (i8* @g to i64), i64 ptrtoint (i8* @g2 to i64))
+; CHECK-NEXT: ret i64 sub (i64 ptrtoint (ptr @g to i64), i64 ptrtoint (ptr @g2 to i64))
;
- %p.int = ptrtoint i8* @g2 to i64
+ %p.int = ptrtoint ptr @g2 to i64
%p.int.neg = sub i64 0, %p.int
- %p1 = getelementptr i8, i8* @g, i64 %p.int.neg
- %p1.int = ptrtoint i8* %p1 to i64
+ %p1 = getelementptr i8, ptr @g, i64 %p.int.neg
+ %p1.int = ptrtoint ptr %p1 to i64
ret i64 %p1.int
}
define i32 @a() nounwind readnone {
entry:
- ret i32 zext (i1 icmp eq (i32 0, i32 ptrtoint (i32 ()* @a to i32)) to i32)
+ ret i32 zext (i1 icmp eq (i32 0, i32 ptrtoint (ptr @a to i32)) to i32)
}
; CHECK: INSTCOMBINE ITERATION #1
; CHECK-NOT: INSTCOMBINE ITERATION #2
; OSS-Fuzz #14169
; https://bugs.chromium.org/p/oss-fuzz/issues/detail?id=14169
-define void @ossfuzz_14169_test1(i32* %a0) {
+define void @ossfuzz_14169_test1(ptr %a0) {
; CHECK-LABEL: @ossfuzz_14169_test1(
; CHECK-NEXT: bb:
; CHECK-NEXT: ret void
;
bb:
- %B = ptrtoint i32* @A to i64
+ %B = ptrtoint ptr @A to i64
%C = icmp sge i64 %B, 0
%X = select i1 %C, i712 0, i712 1
%B9 = lshr i712 %X, 146783911423364576743092537299333564210980159306769991919205685720763064069663027716481187399048043939495936
- %G5 = getelementptr i64, i64* undef, i712 %B9
- store i64* %G5, i64** undef
+ %G5 = getelementptr i64, ptr undef, i712 %B9
+ store ptr %G5, ptr undef
ret void
}
-define void @ossfuzz_14169_test2(i32* %a0) {
+define void @ossfuzz_14169_test2(ptr %a0) {
; CHECK-LABEL: @ossfuzz_14169_test2(
; CHECK-NEXT: bb:
; CHECK-NEXT: ret void
;
bb:
- %B = ptrtoint i32* @A to i64
+ %B = ptrtoint ptr @A to i64
%C = icmp sge i64 %B, 0
%X = select i1 %C, i712 0, i712 1
%B9 = shl i712 %X, 146783911423364576743092537299333564210980159306769991919205685720763064069663027716481187399048043939495936
- %G5 = getelementptr i64, i64* undef, i712 %B9
- store i64* %G5, i64** undef
+ %G5 = getelementptr i64, ptr undef, i712 %B9
+ store ptr %G5, ptr undef
ret void
}
ret i32 %a
}
-define i32 @indirect_call(i32 ()* %f) {
+define i32 @indirect_call(ptr %f) {
; CHECK: call i32 %f() [[$CONVERGENT_ATTR]]
%a = call i32 %f() convergent
ret i32 %a
; PR4908
-define void @test2(<1 x i16>* nocapture %b, i32* nocapture %c) nounwind ssp {
+define void @test2(ptr nocapture %b, ptr nocapture %c) nounwind ssp {
entry:
- %arrayidx = getelementptr inbounds <1 x i16>, <1 x i16>* %b, i64 0 ; <<1 x i16>*>
- %tmp2 = load <1 x i16>, <1 x i16>* %arrayidx ; <<1 x i16>> [#uses=1]
+ %arrayidx = getelementptr inbounds <1 x i16>, ptr %b, i64 0 ; <ptr>
+ %tmp2 = load <1 x i16>, ptr %arrayidx ; <<1 x i16>> [#uses=1]
%tmp6 = bitcast <1 x i16> %tmp2 to i16 ; <i16> [#uses=1]
%tmp7 = zext i16 %tmp6 to i32 ; <i32> [#uses=1]
%ins = or i32 0, %tmp7 ; <i32> [#uses=1]
- %arrayidx20 = getelementptr inbounds i32, i32* %c, i64 0 ; <i32*> [#uses=1]
- store i32 %ins, i32* %arrayidx20
+ %arrayidx20 = getelementptr inbounds i32, ptr %c, i64 0 ; <ptr> [#uses=1]
+ store i32 %ins, ptr %arrayidx20
ret void
}
; PR5262
-@tmp2 = global i64 0 ; <i64*> [#uses=1]
+@tmp2 = global i64 0 ; <ptr> [#uses=1]
declare void @use(i64) nounwind
; <label>:3 ; preds = %2, %1
%4 = phi i8 [ 1, %2 ], [ 0, %1 ] ; <i8> [#uses=1]
%5 = icmp eq i8 %4, 0 ; <i1> [#uses=1]
- %6 = load i64, i64* @tmp2, align 8 ; <i64> [#uses=1]
+ %6 = load i64, ptr @tmp2, align 8 ; <i64> [#uses=1]
%7 = select i1 %5, i64 0, i64 %6 ; <i64> [#uses=1]
br label %8
}
%t0 = type { i32, i32 }
-%t1 = type { i32, i32, i32, i32, i32* }
+%t1 = type { i32, i32, i32, i32, ptr }
-declare %t0* @bar2(i64)
+declare ptr @bar2(i64)
define void @bar3(i1, i1) nounwind align 2 {
; <label>:2
br i1 %1, label %10, label %3
; <label>:3 ; preds = %2
- %4 = getelementptr inbounds %t0, %t0* null, i64 0, i32 1 ; <i32*> [#uses=0]
- %5 = getelementptr inbounds %t1, %t1* null, i64 0, i32 4 ; <i32**> [#uses=1]
- %6 = load i32*, i32** %5, align 8 ; <i32*> [#uses=1]
- %7 = icmp ne i32* %6, null ; <i1> [#uses=1]
+ %4 = getelementptr inbounds %t0, ptr null, i64 0, i32 1 ; <ptr> [#uses=0]
+ %5 = getelementptr inbounds %t1, ptr null, i64 0, i32 4 ; <ptr> [#uses=1]
+ %6 = load ptr, ptr %5, align 8 ; <ptr> [#uses=1]
+ %7 = icmp ne ptr %6, null ; <i1> [#uses=1]
%8 = zext i1 %7 to i32 ; <i32> [#uses=1]
%9 = add i32 %8, 0 ; <i32> [#uses=1]
br label %10
; <label>:13 ; preds = %12, %10
%14 = zext i32 %11 to i64 ; <i64> [#uses=1]
- %15 = tail call %t0* @bar2(i64 %14) nounwind ; <%0*> [#uses=0]
+ %15 = tail call ptr @bar2(i64 %14) nounwind ; <ptr> [#uses=0]
ret void
}
; PR5262
; Make sure the PHI node gets put in a place where all of its operands dominate
; it.
-define i64 @test4(i1 %c, i64* %P) nounwind align 2 {
+define i64 @test4(i1 %c, ptr %P) nounwind align 2 {
BB0:
br i1 %c, label %BB1, label %BB2
BB2:
%v5_ = phi i1 [ true, %BB0], [false, %BB1]
- %v6 = load i64, i64* %P
+ %v6 = load i64, ptr %P
br label %l8
l8:
ret i32 0
}
-define void @test5(i1* %ptr) personality i32 (...)* @__gxx_personality_v0 {
- store i1 true, i1* %ptr
+define void @test5(ptr %ptr) personality ptr @__gxx_personality_v0 {
+ store i1 true, ptr %ptr
%r = invoke i32 @test5a() to label %exit unwind label %unwind
unwind:
- %exn = landingpad {i8*, i32}
+ %exn = landingpad {ptr, i32}
cleanup
br label %exit
exit:
; PR5673
-@test6g = external global i32*
+@test6g = external global ptr
-define arm_aapcs_vfpcc i32 @test6(i32 %argc, i8** %argv) nounwind {
+define arm_aapcs_vfpcc i32 @test6(i32 %argc, ptr %argv) nounwind {
entry:
- store i32* getelementptr (i32, i32* bitcast (i32 (i32, i8**)* @test6 to i32*), i32 -2048), i32** @test6g, align 4
+ store ptr getelementptr (i32, ptr @test6, i32 -2048), ptr @test6g, align 4
unreachable
}
; PR5827
-%class.RuleBasedBreakIterator = type { i64 ()* }
-%class.UStack = type { i8** }
+%class.RuleBasedBreakIterator = type { ptr }
+%class.UStack = type { ptr }
-define i32 @_ZN22RuleBasedBreakIterator15checkDictionaryEi(%class.RuleBasedBreakIterator* %this, i32 %x) align 2 personality i8* bitcast (i32 (...)* @__gxx_personality_v0 to i8*) {
+define i32 @_ZN22RuleBasedBreakIterator15checkDictionaryEi(ptr %this, i32 %x) align 2 personality ptr @__gxx_personality_v0 {
entry:
- %breaks = alloca %class.UStack, align 4 ; <%class.UStack*> [#uses=3]
- call void @_ZN6UStackC1Ei(%class.UStack* %breaks, i32 0)
+ %breaks = alloca %class.UStack, align 4 ; <ptr> [#uses=3]
+ call void @_ZN6UStackC1Ei(ptr %breaks, i32 0)
%tobool = icmp ne i32 %x, 0 ; <i1> [#uses=1]
br i1 %tobool, label %cond.end, label %cond.false
terminate.handler: ; preds = %ehcleanup
- %exc = landingpad { i8*, i32 }
+ %exc = landingpad { ptr, i32 }
cleanup
call void @_ZSt9terminatev() noreturn nounwind
unreachable
ehcleanup: ; preds = %cond.false
- %exc1 = landingpad { i8*, i32 }
- catch i8* null
- invoke void @_ZN6UStackD1Ev(%class.UStack* %breaks)
+ %exc1 = landingpad { ptr, i32 }
+ catch ptr null
+ invoke void @_ZN6UStackD1Ev(ptr %breaks)
to label %cont unwind label %terminate.handler
cont: ; preds = %ehcleanup
- resume { i8*, i32 } %exc1
+ resume { ptr, i32 } %exc1
cond.false: ; preds = %entry
- %tmp4 = getelementptr inbounds %class.RuleBasedBreakIterator, %class.RuleBasedBreakIterator* %this, i32 0, i32 0 ; <i64 ()**> [#uses=1]
- %tmp5 = load i64 ()*, i64 ()** %tmp4 ; <i64 ()*> [#uses=1]
+ %tmp4 = getelementptr inbounds %class.RuleBasedBreakIterator, ptr %this, i32 0, i32 0 ; <ptr> [#uses=1]
+ %tmp5 = load ptr, ptr %tmp4 ; <ptr> [#uses=1]
%call = invoke i64 %tmp5()
to label %cond.end unwind label %ehcleanup ; <i64> [#uses=1]
cond.end: ; preds = %cond.false, %entry
%cond = phi i64 [ 0, %entry ], [ %call, %cond.false ] ; <i64> [#uses=1]
%conv = trunc i64 %cond to i32 ; <i32> [#uses=1]
- call void @_ZN6UStackD1Ev(%class.UStack* %breaks)
+ call void @_ZN6UStackD1Ev(ptr %breaks)
ret i32 %conv
}
-declare void @_ZN6UStackC1Ei(%class.UStack*, i32)
+declare void @_ZN6UStackC1Ei(ptr, i32)
-declare void @_ZN6UStackD1Ev(%class.UStack*)
+declare void @_ZN6UStackD1Ev(ptr)
declare i32 @__gxx_personality_v0(...)
declare void @_ZSt9terminatev()
-declare void @_Unwind_Resume_or_Rethrow(i8*)
+declare void @_Unwind_Resume_or_Rethrow(ptr)
; rdar://7590304
-define i8* @test10(i8* %self, i8* %tmp3, i1* %ptr1, i1* %ptr2) personality i32 (...)* @__gxx_personality_v0 {
+define ptr @test10(ptr %self, ptr %tmp3, ptr %ptr1, ptr %ptr2) personality ptr @__gxx_personality_v0 {
entry:
- store i1 true, i1* %ptr1
- store i1 true, i1* %ptr2
+ store i1 true, ptr %ptr1
+ store i1 true, ptr %ptr2
invoke void @test10a()
- to label %invoke.cont unwind label %try.handler ; <i8*> [#uses=0]
+ to label %invoke.cont unwind label %try.handler ; <ptr> [#uses=0]
invoke.cont: ; preds = %entry
unreachable
try.handler: ; preds = %entry
- %exn = landingpad {i8*, i32}
- catch i8* null
- ret i8* %self
+ %exn = landingpad {ptr, i32}
+ catch ptr null
+ ret ptr %self
}
define void @test10a() {
}
; PR6503
-define void @test12(i32* %A) nounwind {
+define void @test12(ptr %A) nounwind {
entry:
- %tmp1 = load i32, i32* %A
+ %tmp1 = load i32, ptr %A
%cmp = icmp ugt i32 1, %tmp1 ; <i1> [#uses=1]
%conv = zext i1 %cmp to i32 ; <i32> [#uses=1]
- %tmp2 = load i32, i32* %A
+ %tmp2 = load i32, ptr %A
%cmp3 = icmp ne i32 %tmp2, 0 ; <i1> [#uses=1]
%conv4 = zext i1 %cmp3 to i32 ; <i32> [#uses=1]
%or = or i32 %conv, %conv4 ; <i32> [#uses=1]
%s1 = type { %s2, %s2, [6 x %s2], i32, i32, i32, [1 x i32], [0 x i8] }
%s2 = type { i64 }
-define void @test13(i32* %ptr1, i32* %ptr2, i32* %ptr3) nounwind {
+define void @test13(ptr %ptr1, ptr %ptr2, ptr %ptr3) nounwind {
entry:
- %0 = getelementptr inbounds %s1, %s1* null, i64 0, i32 2, i64 0, i32 0
- %1 = bitcast i64* %0 to i32*
- %2 = getelementptr inbounds %s1, %s1* null, i64 0, i32 2, i64 1, i32 0
- %.pre = load i32, i32* %1, align 8
- %3 = lshr i32 %.pre, 19
+ %0 = getelementptr inbounds %s1, ptr null, i64 0, i32 2, i64 0, i32 0
+ %1 = getelementptr inbounds %s1, ptr null, i64 0, i32 2, i64 1, i32 0
+ %.pre = load i32, ptr %0, align 8
+ %2 = lshr i32 %.pre, 19
%brmerge = or i1 1, 0
- %4 = and i32 %3, 3
- %5 = add nsw i32 %4, 1
- %6 = shl i32 %5, 19
- %7 = add i32 %6, 1572864
- %8 = and i32 %7, 1572864
- %9 = load i64, i64* %2, align 8
- %trunc156 = trunc i64 %9 to i32
- %10 = and i32 %trunc156, -1537
- %11 = and i32 %10, -6145
- %12 = or i32 %11, 2048
- %13 = and i32 %12, -24577
- %14 = or i32 %13, 16384
- %15 = or i32 %14, 98304
- store i32 %15, i32* %ptr1, align 8
- %16 = and i32 %15, -1572865
- %17 = or i32 %16, %8
- store i32 %17, i32* %ptr2, align 8
- %18 = and i32 %17, -449
- %19 = or i32 %18, 64
- store i32 %19, i32* %ptr3, align 8
+ %3 = and i32 %2, 3
+ %4 = add nsw i32 %3, 1
+ %5 = shl i32 %4, 19
+ %6 = add i32 %5, 1572864
+ %7 = and i32 %6, 1572864
+ %8 = load i64, ptr %1, align 8
+ %trunc156 = trunc i64 %8 to i32
+ %9 = and i32 %trunc156, -1537
+ %10 = and i32 %9, -6145
+ %11 = or i32 %10, 2048
+ %12 = and i32 %11, -24577
+ %13 = or i32 %12, 16384
+ %14 = or i32 %13, 98304
+ store i32 %14, ptr %ptr1, align 8
+ %15 = and i32 %14, -1572865
+ %16 = or i32 %15, %7
+ store i32 %16, ptr %ptr2, align 8
+ %17 = and i32 %16, -449
+ %18 = or i32 %17, 64
+ store i32 %18, ptr %ptr3, align 8
unreachable
}
; PR8807
-declare i32 @test14f(i8* (i8*)*) nounwind
+declare i32 @test14f(ptr) nounwind
-define void @test14(i32* %ptr) nounwind readnone {
+define void @test14(ptr %ptr) nounwind readnone {
entry:
- %tmp = bitcast i32 (i8* (i8*)*)* @test14f to i32 (i32*)*
- %call10 = call i32 %tmp(i32* byval(i32) %ptr)
+ %call10 = call i32 @test14f(ptr byval(i32) %ptr)
ret void
}
; PR8896
@g_54 = external global [7 x i16]
-define void @test15(i32* %p_92, i1 %c1) nounwind {
+define void @test15(ptr %p_92, i1 %c1) nounwind {
entry:
-%0 = load i32, i32* %p_92, align 4
+%0 = load i32, ptr %p_92, align 4
%1 = icmp ne i32 %0, 0
%2 = zext i1 %1 to i32
%3 = call i32 @func_14() nounwind
; PR8983
%struct.basic_ios = type { i8 }
-define %struct.basic_ios *@test17() ssp {
+define ptr@test17() ssp {
entry:
- %add.ptr.i = getelementptr i8, i8* null, i64 0
- %0 = bitcast i8* %add.ptr.i to %struct.basic_ios*
- ret %struct.basic_ios* %0
+ ret ptr null
}
; PR9013
; PR11275
declare void @test18b() noreturn
-declare void @test18foo(double**)
+declare void @test18foo(ptr)
declare void @test18a() noreturn
-define fastcc void @test18x(i8* %t0, i1 %b) uwtable align 2 personality i32 (...)* @__gxx_personality_v0 {
+define fastcc void @test18x(ptr %t0, i1 %b) uwtable align 2 personality ptr @__gxx_personality_v0 {
entry:
br i1 %b, label %e1, label %e2
e1:
- %t2 = bitcast i8* %t0 to double**
invoke void @test18b() noreturn
to label %u unwind label %lpad
e2:
- %t4 = bitcast i8* %t0 to double**
invoke void @test18a() noreturn
to label %u unwind label %lpad
lpad:
- %t5 = phi double** [ %t2, %e1 ], [ %t4, %e2 ]
- %lpad.nonloopexit262 = landingpad { i8*, i32 }
+ %t5 = phi ptr [ %t0, %e1 ], [ %t0, %e2 ]
+ %lpad.nonloopexit262 = landingpad { ptr, i32 }
cleanup
- call void @test18foo(double** %t5)
+ call void @test18foo(ptr %t5)
unreachable
u:
unreachable
ret i8 %r
}
-define <2 x i32> @mask_one_bit_splat(<2 x i32> %x, <2 x i32>* %p) {
+define <2 x i32> @mask_one_bit_splat(<2 x i32> %x, ptr %p) {
; CHECK-LABEL: @mask_one_bit_splat(
; CHECK-NEXT: [[A:%.*]] = and <2 x i32> [[X:%.*]], <i32 2048, i32 2048>
-; CHECK-NEXT: store <2 x i32> [[A]], <2 x i32>* [[P:%.*]], align 8
+; CHECK-NEXT: store <2 x i32> [[A]], ptr [[P:%.*]], align 8
; CHECK-NEXT: [[R:%.*]] = lshr exact <2 x i32> [[A]], <i32 11, i32 11>
; CHECK-NEXT: ret <2 x i32> [[R]]
;
%a = and <2 x i32> %x, <i32 2048, i32 2048>
- store <2 x i32> %a, <2 x i32>* %p
+ store <2 x i32> %a, ptr %p
%r = call <2 x i32> @llvm.ctpop.v2i32(<2 x i32> %a)
ret <2 x i32> %r
}
ret <2 x i32> %res
}
-define <2 x i32> @ctpop_add_no_common_bits_vec_use(<2 x i32> %a, <2 x i32> %b, <2 x i32>* %p) {
+define <2 x i32> @ctpop_add_no_common_bits_vec_use(<2 x i32> %a, <2 x i32> %b, ptr %p) {
; CHECK-LABEL: @ctpop_add_no_common_bits_vec_use(
; CHECK-NEXT: [[SHL16:%.*]] = shl <2 x i32> [[A:%.*]], <i32 16, i32 16>
; CHECK-NEXT: [[CTPOP1:%.*]] = tail call <2 x i32> @llvm.ctpop.v2i32(<2 x i32> [[SHL16]])
; CHECK-NEXT: [[LSHL16:%.*]] = lshr <2 x i32> [[B:%.*]], <i32 16, i32 16>
; CHECK-NEXT: [[CTPOP2:%.*]] = tail call <2 x i32> @llvm.ctpop.v2i32(<2 x i32> [[LSHL16]])
-; CHECK-NEXT: store <2 x i32> [[CTPOP2]], <2 x i32>* [[P:%.*]], align 8
+; CHECK-NEXT: store <2 x i32> [[CTPOP2]], ptr [[P:%.*]], align 8
; CHECK-NEXT: [[RES:%.*]] = add nuw nsw <2 x i32> [[CTPOP1]], [[CTPOP2]]
; CHECK-NEXT: ret <2 x i32> [[RES]]
;
%ctpop1 = tail call <2 x i32> @llvm.ctpop.v2i32(<2 x i32> %shl16)
%lshl16 = lshr <2 x i32> %b, <i32 16, i32 16>
%ctpop2 = tail call <2 x i32> @llvm.ctpop.v2i32(<2 x i32> %lshl16)
- store <2 x i32> %ctpop2, <2 x i32>* %p
+ store <2 x i32> %ctpop2, ptr %p
%res = add <2 x i32> %ctpop1, %ctpop2
ret <2 x i32> %res
}
-define <2 x i32> @ctpop_add_no_common_bits_vec_use2(<2 x i32> %a, <2 x i32> %b, <2 x i32>* %p) {
+define <2 x i32> @ctpop_add_no_common_bits_vec_use2(<2 x i32> %a, <2 x i32> %b, ptr %p) {
; CHECK-LABEL: @ctpop_add_no_common_bits_vec_use2(
; CHECK-NEXT: [[SHL16:%.*]] = shl <2 x i32> [[A:%.*]], <i32 16, i32 16>
; CHECK-NEXT: [[CTPOP1:%.*]] = tail call <2 x i32> @llvm.ctpop.v2i32(<2 x i32> [[SHL16]])
-; CHECK-NEXT: store <2 x i32> [[CTPOP1]], <2 x i32>* [[P:%.*]], align 8
+; CHECK-NEXT: store <2 x i32> [[CTPOP1]], ptr [[P:%.*]], align 8
; CHECK-NEXT: [[LSHL16:%.*]] = lshr <2 x i32> [[B:%.*]], <i32 16, i32 16>
; CHECK-NEXT: [[CTPOP2:%.*]] = tail call <2 x i32> @llvm.ctpop.v2i32(<2 x i32> [[LSHL16]])
; CHECK-NEXT: [[RES:%.*]] = add nuw nsw <2 x i32> [[CTPOP1]], [[CTPOP2]]
;
%shl16 = shl <2 x i32> %a, <i32 16, i32 16>
%ctpop1 = tail call <2 x i32> @llvm.ctpop.v2i32(<2 x i32> %shl16)
- store <2 x i32> %ctpop1, <2 x i32>* %p
+ store <2 x i32> %ctpop1, ptr %p
%lshl16 = lshr <2 x i32> %b, <i32 16, i32 16>
%ctpop2 = tail call <2 x i32> @llvm.ctpop.v2i32(<2 x i32> %lshl16)
%res = add <2 x i32> %ctpop1, %ctpop2
ret <2 x i32> %res
}
-define <2 x i32> @sub_ctpop_vec_extra_use(<2 x i32> %a, <2 x i32>* %p) {
+define <2 x i32> @sub_ctpop_vec_extra_use(<2 x i32> %a, ptr %p) {
; CHECK-LABEL: @sub_ctpop_vec_extra_use(
; CHECK-NEXT: [[CNT:%.*]] = tail call <2 x i32> @llvm.ctpop.v2i32(<2 x i32> [[A:%.*]])
-; CHECK-NEXT: store <2 x i32> [[CNT]], <2 x i32>* [[P:%.*]], align 8
+; CHECK-NEXT: store <2 x i32> [[CNT]], ptr [[P:%.*]], align 8
; CHECK-NEXT: [[RES:%.*]] = sub nuw nsw <2 x i32> <i32 32, i32 32>, [[CNT]]
; CHECK-NEXT: ret <2 x i32> [[RES]]
;
%cnt = tail call <2 x i32> @llvm.ctpop.v2i32(<2 x i32> %a)
- store <2 x i32> %cnt, <2 x i32>* %p
+ store <2 x i32> %cnt, ptr %p
%res = sub <2 x i32> <i32 32, i32 32>, %cnt
ret <2 x i32> %res
}
ret <2 x i32> %p
}
-define i32 @zext_ctpop_extra_use(i16 %x, i32* %q) {
+define i32 @zext_ctpop_extra_use(i16 %x, ptr %q) {
; CHECK-LABEL: @zext_ctpop_extra_use(
; CHECK-NEXT: [[Z:%.*]] = zext i16 [[X:%.*]] to i32
-; CHECK-NEXT: store i32 [[Z]], i32* [[Q:%.*]], align 4
+; CHECK-NEXT: store i32 [[Z]], ptr [[Q:%.*]], align 4
; CHECK-NEXT: [[P:%.*]] = call i32 @llvm.ctpop.i32(i32 [[Z]]), !range [[RNG4:![0-9]+]]
; CHECK-NEXT: ret i32 [[P]]
;
%z = zext i16 %x to i32
- store i32 %z, i32* %q
+ store i32 %z, ptr %q
%p = call i32 @llvm.ctpop.i32(i32 %z)
ret i32 %p
}
ret i32 %C.upgrd.1
}
-define i32* @test2(i32 %width) {
- %tmp = call i8* @llvm.stacksave( )
+define ptr @test2(i32 %width) {
+ %tmp = call ptr @llvm.stacksave( )
%tmp14 = alloca i32, i32 %width
- ret i32* %tmp14
+ ret ptr %tmp14
}
-declare i8* @llvm.stacksave()
+declare ptr @llvm.stacksave()
-declare void @llvm.lifetime.start.p0i8(i64, i8*)
-declare void @llvm.lifetime.end.p0i8(i64, i8*)
+declare void @llvm.lifetime.start.p0(i64, ptr)
+declare void @llvm.lifetime.end.p0(i64, ptr)
define void @test3() {
- call void @llvm.lifetime.start.p0i8(i64 -1, i8* undef)
- call void @llvm.lifetime.end.p0i8(i64 -1, i8* undef)
+ call void @llvm.lifetime.start.p0(i64 -1, ptr undef)
+ call void @llvm.lifetime.end.p0(i64 -1, ptr undef)
ret void
}
define void @foo() nounwind ssp !dbg !0 {
;CHECK: call i32 @putchar{{.+}} !dbg
- %1 = call i32 (i8*, ...) @printf(i8* getelementptr inbounds ([3 x i8], [3 x i8]* @.str, i32 0, i32 0), i32 97), !dbg !5
+ %1 = call i32 (ptr, ...) @printf(ptr @.str, i32 97), !dbg !5
ret void, !dbg !7
}
-declare i32 @printf(i8*, ...)
+declare i32 @printf(ptr, ...)
!llvm.dbg.cu = !{!2}
!llvm.module.flags = !{!10}
; RUN: opt -passes=instcombine -S %s -o - | FileCheck %s
-; In this example, the cast from i8* to i32* becomes trivially dead. We should
+; In this example, the cast from ptr to ptr becomes trivially dead. We should
; salvage its debug info.
; C source:
-; void use_as_void(void *);
-; void f(void *p) {
+; void use_as_void(ptr);
+; void f(ptr p) {
; int *q = (int *)p;
; use_as_void(q);
; }
target triple = "x86_64-pc-windows-msvc19.11.25508"
; Function Attrs: nounwind uwtable
-define void @f(i8* %p) !dbg !11 {
+define void @f(ptr %p) !dbg !11 {
entry:
- call void @llvm.dbg.value(metadata i8* %p, metadata !16, metadata !DIExpression()), !dbg !18
- %0 = bitcast i8* %p to i32*, !dbg !19
- call void @llvm.dbg.value(metadata i32* %0, metadata !17, metadata !DIExpression()), !dbg !20
- %1 = bitcast i32* %0 to i8*, !dbg !21
- call void @use_as_void(i8* %1), !dbg !22
+ call void @llvm.dbg.value(metadata ptr %p, metadata !16, metadata !DIExpression()), !dbg !18
+ call void @llvm.dbg.value(metadata ptr %p, metadata !17, metadata !DIExpression()), !dbg !20
+ call void @use_as_void(ptr %p), !dbg !22
ret void, !dbg !23
}
-; CHECK-LABEL: define void @f(i8* %p)
-; CHECK: call void @llvm.dbg.value(metadata i8* %p, metadata ![[P_VAR:[0-9]+]], metadata !DIExpression())
+; CHECK-LABEL: define void @f(ptr %p)
+; CHECK: call void @llvm.dbg.value(metadata ptr %p, metadata ![[P_VAR:[0-9]+]], metadata !DIExpression())
; CHECK-NOT: bitcast
-; CHECK: call void @llvm.dbg.value(metadata i8* %p, metadata ![[Q_VAR:[0-9]+]], metadata !DIExpression())
+; CHECK: call void @llvm.dbg.value(metadata ptr %p, metadata ![[Q_VAR:[0-9]+]], metadata !DIExpression())
; CHECK-NOT: bitcast
; CHECK: ret void
; CHECK: ![[P_VAR]] = !DILocalVariable(name: "p", {{.*}})
; CHECK: ![[Q_VAR]] = !DILocalVariable(name: "q", {{.*}})
-declare void @use_as_void(i8*)
+declare void @use_as_void(ptr)
declare void @llvm.dbg.value(metadata, metadata, metadata)
; CHECK-LABEL: @debug_local_scalable(
define <vscale x 2 x double> @debug_local_scalable(<vscale x 2 x double> %tostore) {
%vx = alloca <vscale x 2 x double>, align 16
- call void @llvm.dbg.declare(metadata <vscale x 2 x double>* %vx, metadata !3, metadata !DIExpression()), !dbg !5
- store <vscale x 2 x double> %tostore, <vscale x 2 x double>* %vx, align 16
- %ret = call <vscale x 2 x double> @f(<vscale x 2 x double>* %vx)
+ call void @llvm.dbg.declare(metadata ptr %vx, metadata !3, metadata !DIExpression()), !dbg !5
+ store <vscale x 2 x double> %tostore, ptr %vx, align 16
+ %ret = call <vscale x 2 x double> @f(ptr %vx)
ret <vscale x 2 x double> %ret
}
-declare <vscale x 2 x double> @f(<vscale x 2 x double>*)
+declare <vscale x 2 x double> @f(ptr)
define float @debug_scalablevec_bitcast_to_scalar() {
%v.addr = alloca <vscale x 4 x float>, align 16
- call void @llvm.dbg.declare(metadata <vscale x 4 x float>* %v.addr, metadata !3, metadata !DIExpression()), !dbg !5
- %a = bitcast <vscale x 4 x float>* %v.addr to float*
- %b = load float, float* %a, align 16
+ call void @llvm.dbg.declare(metadata ptr %v.addr, metadata !3, metadata !DIExpression()), !dbg !5
+ %b = load float, ptr %v.addr, align 16
ret float %b
}
; gets folded. The dbg.value should be duplicated in the block its sunk
; into, to maximise liveness.
;
-; CHECK-LABEL: define i32 @foo(i32*
-; CHECK: call void @llvm.dbg.value(metadata i32* %a, metadata !{{[0-9]+}},
+; CHECK-LABEL: define i32 @foo(ptr
+; CHECK: call void @llvm.dbg.value(metadata ptr %a, metadata !{{[0-9]+}},
; CHECK-SAME: metadata !DIExpression(DW_OP_plus_uconst, 4, DW_OP_stack_value))
; CHECK-NEXT: br label %sink1
-define i32 @foo(i32 *%a) !dbg !7 {
+define i32 @foo(ptr %a) !dbg !7 {
entry:
- %gep = getelementptr i32, i32 *%a, i32 1
- call void @llvm.dbg.value(metadata i32 *%gep, metadata !16, metadata !12), !dbg !15
+ %gep = getelementptr i32, ptr %a, i32 1
+ call void @llvm.dbg.value(metadata ptr %gep, metadata !16, metadata !12), !dbg !15
br label %sink1
sink1:
; CHECK-LABEL: sink1:
-; CHECK: call void @llvm.dbg.value(metadata i32* %gep,
+; CHECK: call void @llvm.dbg.value(metadata ptr %gep,
; CHECK-SAME: metadata !{{[0-9]+}}, metadata !DIExpression())
; CHECK-NEXT: load
- %0 = load i32, i32* %gep, align 4, !dbg !15
+ %0 = load i32, ptr %gep, align 4, !dbg !15
ret i32 %0, !dbg !15
}
; value range.
; CHECK-LABEL: define i32 @bar(
-; CHECK: call void @llvm.dbg.value(metadata <vscale x 4 x i32>* undef,
+; CHECK: call void @llvm.dbg.value(metadata ptr undef,
; CHECK-NEXT: br label %sink2
-define i32 @bar(<vscale x 4 x i32>* %a, i32 %b) !dbg !70 {
+define i32 @bar(ptr %a, i32 %b) !dbg !70 {
entry:
- %gep = getelementptr <vscale x 4 x i32>, <vscale x 4 x i32>* %a, i32 %b
- call void @llvm.dbg.value(metadata <vscale x 4 x i32>* %gep, metadata !73, metadata !12), !dbg !74
+ %gep = getelementptr <vscale x 4 x i32>, ptr %a, i32 %b
+ call void @llvm.dbg.value(metadata ptr %gep, metadata !73, metadata !12), !dbg !74
br label %sink2
sink2:
; CHECK-LABEL: sink2:
-; CHECK: call void @llvm.dbg.value(metadata <vscale x 4 x i32>* %gep,
+; CHECK: call void @llvm.dbg.value(metadata ptr %gep,
; CHECK-SAME: metadata !{{[0-9]+}}, metadata !DIExpression())
; CHECK-NEXT: load
; CHECK-NEXT: extractelement
; CHECK-NEXT: ret
- %0 = load <vscale x 4 x i32>, <vscale x 4 x i32>* %gep
+ %0 = load <vscale x 4 x i32>, ptr %gep
%extract = extractelement <vscale x 4 x i32> %0, i32 1
ret i32 %extract
}
; only the last use is cloned into the sunk block, and that both of the\r
; original dbg.values are salvaged.\r
;\r
-; CHECK-LABEL: define i32 @baz(i32*\r
-; CHECK: call void @llvm.dbg.value(metadata i32* %a, metadata !{{[0-9]+}},\r
+; CHECK-LABEL: define i32 @baz(ptr\r
+; CHECK: call void @llvm.dbg.value(metadata ptr %a, metadata !{{[0-9]+}},\r
; CHECK-SAME: metadata !DIExpression(DW_OP_plus_uconst, 4, DW_OP_stack_value))\r
-; CHECK-NEXT: call void @llvm.dbg.value(metadata i32* %a, metadata !{{[0-9]+}},\r
+; CHECK-NEXT: call void @llvm.dbg.value(metadata ptr %a, metadata !{{[0-9]+}},\r
; CHECK-SAME: metadata !DIExpression(DW_OP_plus_uconst, 4, DW_OP_plus_uconst, 5, DW_OP_stack_value))\r
; CHECK-NEXT: br label %sink1\r
\r
-define i32 @baz(i32 *%a) !dbg !80 {\r
+define i32 @baz(ptr %a) !dbg !80 {\r
entry:\r
- %gep = getelementptr i32, i32 *%a, i32 1\r
- call void @llvm.dbg.value(metadata i32 *%gep, metadata !83, metadata !12), !dbg !84\r
- call void @llvm.dbg.value(metadata i32 *%gep, metadata !83, metadata !DIExpression(DW_OP_plus_uconst, 5)), !dbg !85\r
+ %gep = getelementptr i32, ptr %a, i32 1\r
+ call void @llvm.dbg.value(metadata ptr %gep, metadata !83, metadata !12), !dbg !84\r
+ call void @llvm.dbg.value(metadata ptr %gep, metadata !83, metadata !DIExpression(DW_OP_plus_uconst, 5)), !dbg !85\r
br label %sink1\r
\r
sink1:\r
; CHECK-LABEL: sink1:\r
-; CHECK: call void @llvm.dbg.value(metadata i32* %gep,\r
+; CHECK: call void @llvm.dbg.value(metadata ptr %gep,\r
; CHECK-SAME: metadata !{{[0-9]+}}, metadata !DIExpression(DW_OP_plus_uconst, 5))\r
; CHECK-NEXT: load\r
- %0 = load i32, i32* %gep, align 4, !dbg !85\r
+ %0 = load i32, ptr %gep, align 4, !dbg !85\r
ret i32 %0, !dbg !85\r
}\r
\r
define i32 @foo(i32 %j) #0 !dbg !7 {
entry:
%j.addr = alloca i32, align 4
- store i32 %j, i32* %j.addr, align 4
- call void @llvm.dbg.declare(metadata i32* %j.addr, metadata !11, metadata !12), !dbg !13
+ store i32 %j, ptr %j.addr, align 4
+ call void @llvm.dbg.declare(metadata ptr %j.addr, metadata !11, metadata !12), !dbg !13
call void @llvm.dbg.value(metadata i32 10, metadata !16, metadata !12), !dbg !15
- %0 = load i32, i32* %j.addr, align 4, !dbg !14
+ %0 = load i32, ptr %j.addr, align 4, !dbg !14
ret i32 %0, !dbg !15
}
ret void
}
-define void @test_ptrtoint(i64* %P) {
+define void @test_ptrtoint(ptr %P) {
; CHECK-LABEL: @test_ptrtoint
-; CHECK-NEXT: call void @llvm.dbg.value(metadata i64* %P, {{.*}}, metadata !DIExpression())
- %1 = ptrtoint i64* %P to i64
+; CHECK-NEXT: call void @llvm.dbg.value(metadata ptr %P, {{.*}}, metadata !DIExpression())
+ %1 = ptrtoint ptr %P to i64
ret void
}
declare void @llvm.dbg.declare(metadata, metadata, metadata) nounwind readnone
-declare i64 @llvm.objectsize.i64.p0i8(i8*, i1) nounwind readnone
+declare i64 @llvm.objectsize.i64.p0(ptr, i1) nounwind readnone
-declare i8* @passthru_callee(i8*, i32, i64, i64)
+declare ptr @passthru_callee(ptr, i32, i64, i64)
-define i8* @passthru(i8* %a, i32 %b, i64 %c) !dbg !1 {
+define ptr @passthru(ptr %a, i32 %b, i64 %c) !dbg !1 {
entry:
- %a.addr = alloca i8*, align 8
+ %a.addr = alloca ptr, align 8
%b.addr = alloca i32, align 4
%c.addr = alloca i64, align 8
- store i8* %a, i8** %a.addr, align 8
- call void @llvm.dbg.declare(metadata i8** %a.addr, metadata !0, metadata !DIExpression()), !dbg !16
- store i32 %b, i32* %b.addr, align 4
- call void @llvm.dbg.declare(metadata i32* %b.addr, metadata !7, metadata !DIExpression()), !dbg !18
- store i64 %c, i64* %c.addr, align 8
- call void @llvm.dbg.declare(metadata i64* %c.addr, metadata !9, metadata !DIExpression()), !dbg !20
- %tmp = load i8*, i8** %a.addr, align 8, !dbg !21
- %tmp1 = load i32, i32* %b.addr, align 4, !dbg !21
- %tmp2 = load i64, i64* %c.addr, align 8, !dbg !21
- %tmp3 = load i8*, i8** %a.addr, align 8, !dbg !21
- %0 = call i64 @llvm.objectsize.i64.p0i8(i8* %tmp3, i1 false), !dbg !21
- %call = call i8* @passthru_callee(i8* %tmp, i32 %tmp1, i64 %tmp2, i64 %0), !dbg !21
- ret i8* %call, !dbg !21
+ store ptr %a, ptr %a.addr, align 8
+ call void @llvm.dbg.declare(metadata ptr %a.addr, metadata !0, metadata !DIExpression()), !dbg !16
+ store i32 %b, ptr %b.addr, align 4
+ call void @llvm.dbg.declare(metadata ptr %b.addr, metadata !7, metadata !DIExpression()), !dbg !18
+ store i64 %c, ptr %c.addr, align 8
+ call void @llvm.dbg.declare(metadata ptr %c.addr, metadata !9, metadata !DIExpression()), !dbg !20
+ %tmp = load ptr, ptr %a.addr, align 8, !dbg !21
+ %tmp1 = load i32, ptr %b.addr, align 4, !dbg !21
+ %tmp2 = load i64, ptr %c.addr, align 8, !dbg !21
+ %tmp3 = load ptr, ptr %a.addr, align 8, !dbg !21
+ %0 = call i64 @llvm.objectsize.i64.p0(ptr %tmp3, i1 false), !dbg !21
+ %call = call ptr @passthru_callee(ptr %tmp, i32 %tmp1, i64 %tmp2, i64 %0), !dbg !21
+ ret ptr %call, !dbg !21
}
-; CHECK-LABEL: define i8* @passthru(i8* %a, i32 %b, i64 %c)
+; CHECK-LABEL: define ptr @passthru(ptr %a, i32 %b, i64 %c)
; CHECK-NOT: alloca
; CHECK-NOT: store
; CHECK-NOT: call void @llvm.dbg.declare
-; CHECK: call void @llvm.dbg.value(metadata i8* %a, {{.*}})
+; CHECK: call void @llvm.dbg.value(metadata ptr %a, {{.*}})
; CHECK-NOT: store
; CHECK: call void @llvm.dbg.value(metadata i32 %b, {{.*}})
; CHECK-NOT: store
; CHECK: call void @llvm.dbg.value(metadata i64 %c, {{.*}})
; CHECK-NOT: store
-; CHECK: call i8* @passthru_callee(i8* %a, i32 %b, i64 %c, i64 %{{.*}})
+; CHECK: call ptr @passthru_callee(ptr %a, i32 %b, i64 %c, i64 %{{.*}})
declare void @tworegs_callee(i64, i64)
define void @tworegs(i64 %o.coerce0, i64 %o.coerce1) !dbg !31 {
entry:
%o = alloca %struct.TwoRegs, align 8
- %0 = bitcast %struct.TwoRegs* %o to { i64, i64 }*
- %1 = getelementptr inbounds { i64, i64 }, { i64, i64 }* %0, i32 0, i32 0
- store i64 %o.coerce0, i64* %1, align 8
- %2 = getelementptr inbounds { i64, i64 }, { i64, i64 }* %0, i32 0, i32 1
- store i64 %o.coerce1, i64* %2, align 8
- call void @llvm.dbg.declare(metadata %struct.TwoRegs* %o, metadata !35, metadata !DIExpression()), !dbg !32
- %3 = bitcast %struct.TwoRegs* %o to { i64, i64 }*, !dbg !33
- %4 = getelementptr inbounds { i64, i64 }, { i64, i64 }* %3, i32 0, i32 0, !dbg !33
- %5 = load i64, i64* %4, align 8, !dbg !33
- %6 = getelementptr inbounds { i64, i64 }, { i64, i64 }* %3, i32 0, i32 1, !dbg !33
- %7 = load i64, i64* %6, align 8, !dbg !33
- call void @tworegs_callee(i64 %5, i64 %7), !dbg !33
+ %0 = getelementptr inbounds { i64, i64 }, ptr %o, i32 0, i32 0
+ store i64 %o.coerce0, ptr %0, align 8
+ %1 = getelementptr inbounds { i64, i64 }, ptr %o, i32 0, i32 1
+ store i64 %o.coerce1, ptr %1, align 8
+ call void @llvm.dbg.declare(metadata ptr %o, metadata !35, metadata !DIExpression()), !dbg !32
+ %2 = getelementptr inbounds { i64, i64 }, ptr %o, i32 0, i32 0, !dbg !33
+ %3 = load i64, ptr %2, align 8, !dbg !33
+ %4 = getelementptr inbounds { i64, i64 }, ptr %o, i32 0, i32 1, !dbg !33
+ %5 = load i64, ptr %4, align 8, !dbg !33
+ call void @tworegs_callee(i64 %3, i64 %5), !dbg !33
ret void, !dbg !33
}
%struct.vm_object = type { i64 }
; Function Attrs: nounwind ssp
-define void @f(%struct.vm_object* %object, i64* nocapture readonly %start, i1 %c1) local_unnamed_addr #0 !dbg !11 {
+define void @f(ptr %object, ptr nocapture readonly %start, i1 %c1) local_unnamed_addr #0 !dbg !11 {
entry:
- tail call void @llvm.dbg.value(metadata %struct.vm_object* %object, metadata !21, metadata !DIExpression()), !dbg !27
- tail call void @llvm.dbg.value(metadata i64* %start, metadata !22, metadata !DIExpression()), !dbg !28
- %0 = load i64, i64* %start, align 4, !dbg !29
+ tail call void @llvm.dbg.value(metadata ptr %object, metadata !21, metadata !DIExpression()), !dbg !27
+ tail call void @llvm.dbg.value(metadata ptr %start, metadata !22, metadata !DIExpression()), !dbg !28
+ %0 = load i64, ptr %start, align 4, !dbg !29
tail call void @llvm.dbg.value(metadata i64 %0, metadata !25, metadata !DIExpression()), !dbg !30
%offset.08 = add i64 %0, -4096
tail call void @llvm.dbg.value(metadata i64 %offset.08, metadata !26, metadata !DIExpression()), !dbg !31
%offset.010 = phi i64 [ %offset.08, %for.body.lr.ph ], [ %offset.0, %for.body ]
%head_size.09 = phi i32 [ poison, %for.body.lr.ph ], [ %sub2, %for.body ]
tail call void @llvm.dbg.value(metadata i32 %head_size.09, metadata !23, metadata !DIExpression()), !dbg !31
- %call = tail call i32 bitcast (i32 (...)* @use to i32 (i64, %struct.vm_object*)*)(i64 %offset.010, %struct.vm_object* %object) #3, !dbg !34
+ %call = tail call i32 @use(i64 %offset.010, ptr %object) #3, !dbg !34
%sub2 = add i32 %head_size.09, -4096, !dbg !37
%offset.0 = add i64 %offset.010, -4096
tail call void @llvm.dbg.value(metadata i64 %offset.0, metadata !26, metadata !DIExpression()), !dbg !30
; RUN: opt -verify -instcombine < %s
%Foo = type <{ i8, x86_fp80 }>
-define i8 @t(%Foo* %arg) {
+define i8 @t(ptr %arg) {
entry:
- %0 = getelementptr %Foo, %Foo* %arg, i32 0, i32 0
- %1 = load i8, i8* %0, align 1
- ret i8 %1
+ %0 = load i8, ptr %arg, align 1
+ ret i8 %0
}
@empty = constant [1 x i8] c"\00", align 1
-declare i8* @strcat(i8*, i8*)
+declare ptr @strcat(ptr, ptr)
-define i8* @test_strcat(i8* %x) {
+define ptr @test_strcat(ptr %x) {
; CHECK-LABEL: @test_strcat(
- %empty = getelementptr [1 x i8], [1 x i8]* @empty, i32 0, i32 0
- %ret = call i8* @strcat(i8* %x, i8* %empty)
- ret i8* %ret
-; CHECK: call i8* @strcat
+ %ret = call ptr @strcat(ptr %x, ptr @empty)
+ ret ptr %ret
+; CHECK: call ptr @strcat
}
declare double @cos(double)
declare double @fabs(double)
declare double @floor(double)
-declare i8* @strcat(i8*, i8*)
-declare i8* @strncat(i8*, i8*, i32)
-declare i8* @strchr(i8*, i32)
-declare i8* @strrchr(i8*, i32)
-declare i32 @strcmp(i8*, i8*)
-declare i32 @strncmp(i8*, i8*, i64)
-declare i8* @strcpy(i8*, i8*)
-declare i8* @stpcpy(i8*, i8*)
-declare i8* @strncpy(i8*, i8*, i64)
-declare i64 @strlen(i8*)
-declare i8* @strpbrk(i8*, i8*)
-declare i64 @strspn(i8*, i8*)
-declare double @strtod(i8*, i8**)
-declare float @strtof(i8*, i8**)
-declare x86_fp80 @strtold(i8*, i8**)
-declare i64 @strtol(i8*, i8**, i32)
-declare i64 @strtoll(i8*, i8**, i32)
-declare i64 @strtoul(i8*, i8**, i32)
-declare i64 @strtoull(i8*, i8**, i32)
-declare i64 @strcspn(i8*, i8*)
+declare ptr @strcat(ptr, ptr)
+declare ptr @strncat(ptr, ptr, i32)
+declare ptr @strchr(ptr, i32)
+declare ptr @strrchr(ptr, i32)
+declare i32 @strcmp(ptr, ptr)
+declare i32 @strncmp(ptr, ptr, i64)
+declare ptr @strcpy(ptr, ptr)
+declare ptr @stpcpy(ptr, ptr)
+declare ptr @strncpy(ptr, ptr, i64)
+declare i64 @strlen(ptr)
+declare ptr @strpbrk(ptr, ptr)
+declare i64 @strspn(ptr, ptr)
+declare double @strtod(ptr, ptr)
+declare float @strtof(ptr, ptr)
+declare x86_fp80 @strtold(ptr, ptr)
+declare i64 @strtol(ptr, ptr, i32)
+declare i64 @strtoll(ptr, ptr, i32)
+declare i64 @strtoul(ptr, ptr, i32)
+declare i64 @strtoull(ptr, ptr, i32)
+declare i64 @strcspn(ptr, ptr)
declare i32 @abs(i32)
declare i32 @ffs(i32)
declare i32 @ffsl(i64)
declare i32 @ffsll(i64)
-declare i32 @fprintf(i8*, i8*)
+declare i32 @fprintf(ptr, ptr)
declare i32 @isascii(i32)
declare i32 @isdigit(i32)
declare i32 @toascii(i32)
declare i64 @labs(i64)
declare i64 @llabs(i64)
-declare i32 @printf(i8*)
-declare i32 @sprintf(i8*, i8*)
+declare i32 @printf(ptr)
+declare i32 @sprintf(ptr, ptr)
define double @t1(double %x) {
; CHECK-LABEL: @t1(
; CHECK: call double @floor
}
-define i8* @t6(i8* %x) {
+define ptr @t6(ptr %x) {
; CHECK-LABEL: @t6(
- %empty = getelementptr [1 x i8], [1 x i8]* @empty, i32 0, i32 0
- %ret = call i8* @strcat(i8* %x, i8* %empty)
- ret i8* %ret
-; CHECK: call i8* @strcat
+ %ret = call ptr @strcat(ptr %x, ptr @empty)
+ ret ptr %ret
+; CHECK: call ptr @strcat
}
-define i8* @t7(i8* %x) {
+define ptr @t7(ptr %x) {
; CHECK-LABEL: @t7(
- %empty = getelementptr [1 x i8], [1 x i8]* @empty, i32 0, i32 0
- %ret = call i8* @strncat(i8* %x, i8* %empty, i32 1)
- ret i8* %ret
-; CHECK: call i8* @strncat
+ %ret = call ptr @strncat(ptr %x, ptr @empty, i32 1)
+ ret ptr %ret
+; CHECK: call ptr @strncat
}
-define i8* @t8() {
+define ptr @t8() {
; CHECK-LABEL: @t8(
- %x = getelementptr inbounds [13 x i8], [13 x i8]* @.str1, i32 0, i32 0
- %ret = call i8* @strchr(i8* %x, i32 119)
- ret i8* %ret
-; CHECK: call i8* @strchr
+ %ret = call ptr @strchr(ptr @.str1, i32 119)
+ ret ptr %ret
+; CHECK: call ptr @strchr
}
-define i8* @t9() {
+define ptr @t9() {
; CHECK-LABEL: @t9(
- %x = getelementptr inbounds [13 x i8], [13 x i8]* @.str1, i32 0, i32 0
- %ret = call i8* @strrchr(i8* %x, i32 119)
- ret i8* %ret
-; CHECK: call i8* @strrchr
+ %ret = call ptr @strrchr(ptr @.str1, i32 119)
+ ret ptr %ret
+; CHECK: call ptr @strrchr
}
define i32 @t10() {
; CHECK-LABEL: @t10(
- %x = getelementptr inbounds [4 x i8], [4 x i8]* @.str2, i32 0, i32 0
- %y = getelementptr inbounds [4 x i8], [4 x i8]* @.str3, i32 0, i32 0
- %ret = call i32 @strcmp(i8* %x, i8* %y)
+ %ret = call i32 @strcmp(ptr @.str2, ptr @.str3)
ret i32 %ret
; CHECK: call i32 @strcmp
}
define i32 @t11() {
; CHECK-LABEL: @t11(
- %x = getelementptr inbounds [4 x i8], [4 x i8]* @.str2, i32 0, i32 0
- %y = getelementptr inbounds [4 x i8], [4 x i8]* @.str3, i32 0, i32 0
- %ret = call i32 @strncmp(i8* %x, i8* %y, i64 3)
+ %ret = call i32 @strncmp(ptr @.str2, ptr @.str3, i64 3)
ret i32 %ret
; CHECK: call i32 @strncmp
}
-define i8* @t12(i8* %x) {
+define ptr @t12(ptr %x) {
; CHECK-LABEL: @t12(
- %y = getelementptr inbounds [4 x i8], [4 x i8]* @.str2, i32 0, i32 0
- %ret = call i8* @strcpy(i8* %x, i8* %y)
- ret i8* %ret
-; CHECK: call i8* @strcpy
+ %ret = call ptr @strcpy(ptr %x, ptr @.str2)
+ ret ptr %ret
+; CHECK: call ptr @strcpy
}
-define i8* @t13(i8* %x) {
+define ptr @t13(ptr %x) {
; CHECK-LABEL: @t13(
- %y = getelementptr inbounds [4 x i8], [4 x i8]* @.str2, i32 0, i32 0
- %ret = call i8* @stpcpy(i8* %x, i8* %y)
- ret i8* %ret
-; CHECK: call i8* @stpcpy
+ %ret = call ptr @stpcpy(ptr %x, ptr @.str2)
+ ret ptr %ret
+; CHECK: call ptr @stpcpy
}
-define i8* @t14(i8* %x) {
+define ptr @t14(ptr %x) {
; CHECK-LABEL: @t14(
- %y = getelementptr inbounds [4 x i8], [4 x i8]* @.str2, i32 0, i32 0
- %ret = call i8* @strncpy(i8* %x, i8* %y, i64 3)
- ret i8* %ret
-; CHECK: call i8* @strncpy
+ %ret = call ptr @strncpy(ptr %x, ptr @.str2, i64 3)
+ ret ptr %ret
+; CHECK: call ptr @strncpy
}
define i64 @t15() {
; CHECK-LABEL: @t15(
- %x = getelementptr inbounds [4 x i8], [4 x i8]* @.str2, i32 0, i32 0
- %ret = call i64 @strlen(i8* %x)
+ %ret = call i64 @strlen(ptr @.str2)
ret i64 %ret
; CHECK: call i64 @strlen
}
-define i8* @t16(i8* %x) {
+define ptr @t16(ptr %x) {
; CHECK-LABEL: @t16(
- %y = getelementptr inbounds [1 x i8], [1 x i8]* @.str, i32 0, i32 0
- %ret = call i8* @strpbrk(i8* %x, i8* %y)
- ret i8* %ret
-; CHECK: call i8* @strpbrk
+ %ret = call ptr @strpbrk(ptr %x, ptr @.str)
+ ret ptr %ret
+; CHECK: call ptr @strpbrk
}
-define i64 @t17(i8* %x) {
+define i64 @t17(ptr %x) {
; CHECK-LABEL: @t17(
- %y = getelementptr inbounds [1 x i8], [1 x i8]* @.str, i32 0, i32 0
- %ret = call i64 @strspn(i8* %x, i8* %y)
+ %ret = call i64 @strspn(ptr %x, ptr @.str)
ret i64 %ret
; CHECK: call i64 @strspn
}
-define double @t18(i8** %y) {
+define double @t18(ptr %y) {
; CHECK-LABEL: @t18(
- %x = getelementptr inbounds [6 x i8], [6 x i8]* @.str4, i64 0, i64 0
- %ret = call double @strtod(i8* %x, i8** %y)
+ %ret = call double @strtod(ptr @.str4, ptr %y)
ret double %ret
; CHECK: call double @strtod
}
-define float @t19(i8** %y) {
+define float @t19(ptr %y) {
; CHECK-LABEL: @t19(
- %x = getelementptr inbounds [6 x i8], [6 x i8]* @.str4, i64 0, i64 0
- %ret = call float @strtof(i8* %x, i8** %y)
+ %ret = call float @strtof(ptr @.str4, ptr %y)
ret float %ret
; CHECK: call float @strtof
}
-define x86_fp80 @t20(i8** %y) {
+define x86_fp80 @t20(ptr %y) {
; CHECK-LABEL: @t20(
- %x = getelementptr inbounds [6 x i8], [6 x i8]* @.str4, i64 0, i64 0
- %ret = call x86_fp80 @strtold(i8* %x, i8** %y)
+ %ret = call x86_fp80 @strtold(ptr @.str4, ptr %y)
ret x86_fp80 %ret
; CHECK: call x86_fp80 @strtold
}
-define i64 @t21(i8** %y) {
+define i64 @t21(ptr %y) {
; CHECK-LABEL: @t21(
- %x = getelementptr inbounds [5 x i8], [5 x i8]* @.str5, i64 0, i64 0
- %ret = call i64 @strtol(i8* %x, i8** %y, i32 10)
+ %ret = call i64 @strtol(ptr @.str5, ptr %y, i32 10)
ret i64 %ret
; CHECK: call i64 @strtol
}
-define i64 @t22(i8** %y) {
+define i64 @t22(ptr %y) {
; CHECK-LABEL: @t22(
- %x = getelementptr inbounds [5 x i8], [5 x i8]* @.str5, i64 0, i64 0
- %ret = call i64 @strtoll(i8* %x, i8** %y, i32 10)
+ %ret = call i64 @strtoll(ptr @.str5, ptr %y, i32 10)
ret i64 %ret
; CHECK: call i64 @strtoll
}
-define i64 @t23(i8** %y) {
+define i64 @t23(ptr %y) {
; CHECK-LABEL: @t23(
- %x = getelementptr inbounds [5 x i8], [5 x i8]* @.str5, i64 0, i64 0
- %ret = call i64 @strtoul(i8* %x, i8** %y, i32 10)
+ %ret = call i64 @strtoul(ptr @.str5, ptr %y, i32 10)
ret i64 %ret
; CHECK: call i64 @strtoul
}
-define i64 @t24(i8** %y) {
+define i64 @t24(ptr %y) {
; CHECK-LABEL: @t24(
- %x = getelementptr inbounds [5 x i8], [5 x i8]* @.str5, i64 0, i64 0
- %ret = call i64 @strtoull(i8* %x, i8** %y, i32 10)
+ %ret = call i64 @strtoull(ptr @.str5, ptr %y, i32 10)
ret i64 %ret
; CHECK: call i64 @strtoull
}
-define i64 @t25(i8* %y) {
+define i64 @t25(ptr %y) {
; CHECK-LABEL: @t25(
- %x = getelementptr [1 x i8], [1 x i8]* @empty, i32 0, i32 0
- %ret = call i64 @strcspn(i8* %x, i8* %y)
+ %ret = call i64 @strcspn(ptr @empty, ptr %y)
ret i64 %ret
; CHECK: call i64 @strcspn
}
define void @t30() {
; CHECK-LABEL: @t30(
- %x = getelementptr inbounds [13 x i8], [13 x i8]* @.str1, i32 0, i32 0
- call i32 @fprintf(i8* null, i8* %x)
+ call i32 @fprintf(ptr null, ptr @.str1)
ret void
; CHECK: call i32 @fprintf
}
define void @t36() {
; CHECK-LABEL: @t36(
- %x = getelementptr inbounds [1 x i8], [1 x i8]* @empty, i32 0, i32 0
- call i32 @printf(i8* %x)
+ call i32 @printf(ptr @empty)
ret void
; CHECK: call i32 @printf
}
-define void @t37(i8* %x) {
+define void @t37(ptr %x) {
; CHECK-LABEL: @t37(
- %y = getelementptr inbounds [13 x i8], [13 x i8]* @.str1, i32 0, i32 0
- call i32 @sprintf(i8* %x, i8* %y)
+ call i32 @sprintf(ptr %x, ptr @.str1)
ret void
; CHECK: call i32 @sprintf
}
br label %for.cond.i
for.cond.i: ; preds = %land.lhs.true, %entry
- %0 = getelementptr inbounds %struct.S0.0.1.2.3.4.13.22.31.44.48.53.54.55.56.58.59.60.66.68.70.74.77.106.107.108.109.110.113.117.118.128.129, %struct.S0.0.1.2.3.4.13.22.31.44.48.53.54.55.56.58.59.60.66.68.70.74.77.106.107.108.109.110.113.117.118.128.129* %l_819.i.i, i64 0, i32 0
br label %for.cond.i6.i.i
for.cond.i6.i.i: ; preds = %for.body.i8.i.i, %for.cond.i
br label %for.cond1.i.i.i.i
func_39.exit.i.i: ; preds = %for.cond1.i.i.i.i
- %l_8191.sroa.0.0.copyload.i.i = load i64, i64* %0, align 1
+ %l_8191.sroa.0.0.copyload.i.i = load i64, ptr %l_819.i.i, align 1
br label %for.cond1.i.i.i
for.cond1.i.i.i: ; preds = %safe_div_func_uint32_t_u_u.exit.i.i.i, %func_39.exit.i.i
ret <2 x i32> %mul
}
-define i32 @test37(i32* %b, i1 %c1) {
+define i32 @test37(ptr %b, i1 %c1) {
; CHECK-LABEL: @test37(
; CHECK-NEXT: entry:
-; CHECK-NEXT: store i32 0, i32* [[B:%.*]], align 4
+; CHECK-NEXT: store i32 0, ptr [[B:%.*]], align 4
; CHECK-NEXT: br i1 [[C1:%.*]], label [[LOR_RHS:%.*]], label [[LOR_END:%.*]]
; CHECK: lor.rhs:
; CHECK-NEXT: br label [[LOR_END]]
; CHECK-NEXT: ret i32 0
;
entry:
- store i32 0, i32* %b, align 4
- %0 = load i32, i32* %b, align 4
+ store i32 0, ptr %b, align 4
+ %0 = load i32, ptr %b, align 4
br i1 %c1, label %lor.rhs, label %lor.end
lor.rhs: ; preds = %entry
define float @test_no_shrink_intrin_floor_multi_use_fpext(half %C) {
; CHECK-LABEL: @test_no_shrink_intrin_floor_multi_use_fpext(
; CHECK-NEXT: [[D:%.*]] = fpext half [[C:%.*]] to double
-; CHECK-NEXT: store volatile double [[D]], double* undef, align 8
+; CHECK-NEXT: store volatile double [[D]], ptr undef, align 8
; CHECK-NEXT: [[E:%.*]] = call double @llvm.floor.f64(double [[D]])
; CHECK-NEXT: [[F:%.*]] = fptrunc double [[E]] to float
; CHECK-NEXT: ret float [[F]]
;
%D = fpext half %C to double
- store volatile double %D, double* undef
+ store volatile double %D, ptr undef
%E = call double @llvm.floor.f64(double %D)
%F = fptrunc double %E to float
ret float %F
define float @test_no_shrink_intrin_fabs_multi_use_fpext(half %C) {
; CHECK-LABEL: @test_no_shrink_intrin_fabs_multi_use_fpext(
; CHECK-NEXT: [[D:%.*]] = fpext half [[C:%.*]] to double
-; CHECK-NEXT: store volatile double [[D]], double* undef, align 8
+; CHECK-NEXT: store volatile double [[D]], ptr undef, align 8
; CHECK-NEXT: [[E:%.*]] = call double @llvm.fabs.f64(double [[D]])
; CHECK-NEXT: [[F:%.*]] = fptrunc double [[E]] to float
; CHECK-NEXT: ret float [[F]]
;
%D = fpext half %C to double
- store volatile double %D, double* undef
+ store volatile double %D, ptr undef
%E = call double @llvm.fabs.f64(double %D)
%F = fptrunc double %E to float
ret float %F
; CHECK-LABEL: @foo(
; CHECK: alloca
; CHECK: align 16
- %2 = alloca [3 x <{ { { [2 x { { i32 } }], [2 x i8], { i16 }, [2 x i8], i8, i8 } } }>], align 16 ; <[3 x <{ { { [2 x { { i32 } }], [2 x i8], { i16 }, [2 x i8], i8, i8 } } }>]*> [#uses=1]
- %3 = getelementptr [3 x <{ { { [2 x { { i32 } }], [2 x i8], { i16 }, [2 x i8], i8, i8 } } }>], [3 x <{ { { [2 x { { i32 } }], [2 x i8], { i16 }, [2 x i8], i8, i8 } } }>]* %2, i32 0, i32 0 ; <<{ { { [2 x { { i32 } }], [2 x i8], { i16 }, [2 x i8], i8, i8 } } }>*> [#uses=1]
- %4 = getelementptr <{ { { [2 x { { i32 } }], [2 x i8], { i16 }, [2 x i8], i8, i8 } } }>, <{ { { [2 x { { i32 } }], [2 x i8], { i16 }, [2 x i8], i8, i8 } } }>* %3, i32 0, i32 0 ; <{ { [2 x { { i32 } }], [2 x i8], { i16 }, [2 x i8], i8, i8 } }*> [#uses=1]
- %5 = getelementptr { { [2 x { { i32 } }], [2 x i8], { i16 }, [2 x i8], i8, i8 } }, { { [2 x { { i32 } }], [2 x i8], { i16 }, [2 x i8], i8, i8 } }* %4, i32 0, i32 0 ; <{ [2 x { { i32 } }], [2 x i8], { i16 }, [2 x i8], i8, i8 }*> [#uses=1]
- %6 = bitcast { [2 x { { i32 } }], [2 x i8], { i16 }, [2 x i8], i8, i8 }* %5 to { [8 x i16] }* ; <{ [8 x i16] }*> [#uses=1]
- %7 = getelementptr { [8 x i16] }, { [8 x i16] }* %6, i32 0, i32 0 ; <[8 x i16]*> [#uses=1]
- %8 = getelementptr [8 x i16], [8 x i16]* %7, i32 0, i32 0 ; <i16*> [#uses=1]
- store i16 0, i16* %8, align 16
- call void @bar(i16* %8)
+ %2 = alloca [3 x <{ { { [2 x { { i32 } }], [2 x i8], { i16 }, [2 x i8], i8, i8 } } }>], align 16 ; <ptr> [#uses=1]
+ %3 = getelementptr [3 x <{ { { [2 x { { i32 } }], [2 x i8], { i16 }, [2 x i8], i8, i8 } } }>], ptr %2, i32 0, i32 0 ; <ptr> [#uses=1]
+ %4 = getelementptr <{ { { [2 x { { i32 } }], [2 x i8], { i16 }, [2 x i8], i8, i8 } } }>, ptr %3, i32 0, i32 0 ; <ptr> [#uses=1]
+ %5 = getelementptr { { [2 x { { i32 } }], [2 x i8], { i16 }, [2 x i8], i8, i8 } }, ptr %4, i32 0, i32 0 ; <ptr> [#uses=1]
+ %6 = getelementptr { [8 x i16] }, ptr %5, i32 0, i32 0 ; <ptr> [#uses=1]
+ %7 = getelementptr [8 x i16], ptr %6, i32 0, i32 0 ; <ptr> [#uses=1]
+ store i16 0, ptr %7, align 16
+ call void @bar(ptr %7)
ret void
}
-declare void @bar(i16*)
+declare void @bar(ptr)
-define void @foo_as1(i32 %a, [3 x <{ { { [2 x { { i32 } }], [2 x i8], { i16 }, [2 x i8], i8, i8 } } }>] addrspace(1)* %b) {
+define void @foo_as1(i32 %a, ptr addrspace(1) %b) {
; CHECK-LABEL: @foo_as1(
; CHECK: align 16
- %1 = getelementptr [3 x <{ { { [2 x { { i32 } }], [2 x i8], { i16 }, [2 x i8], i8, i8 } } }>], [3 x <{ { { [2 x { { i32 } }], [2 x i8], { i16 }, [2 x i8], i8, i8 } } }>] addrspace(1)* %b, i32 0, i32 0 ; <<{ { { [2 x { { i32 } }], [2 x i8], { i16 }, [2 x i8], i8, i8 } } }>*> [#uses=1]
- %2 = getelementptr <{ { { [2 x { { i32 } }], [2 x i8], { i16 }, [2 x i8], i8, i8 } } }>, <{ { { [2 x { { i32 } }], [2 x i8], { i16 }, [2 x i8], i8, i8 } } }> addrspace(1)* %1, i32 0, i32 0 ; <{ { [2 x { { i32 } }], [2 x i8], { i16 }, [2 x i8], i8, i8 } }*> [#uses=1]
- %3 = getelementptr { { [2 x { { i32 } }], [2 x i8], { i16 }, [2 x i8], i8, i8 } }, { { [2 x { { i32 } }], [2 x i8], { i16 }, [2 x i8], i8, i8 } } addrspace(1)* %2, i32 0, i32 0 ; <{ [2 x { { i32 } }], [2 x i8], { i16 }, [2 x i8], i8, i8 }*> [#uses=1]
- %4 = bitcast { [2 x { { i32 } }], [2 x i8], { i16 }, [2 x i8], i8, i8 } addrspace(1)* %3 to { [8 x i16] } addrspace(1)* ; <{ [8 x i16] }*> [#uses=1]
- %5 = getelementptr { [8 x i16] }, { [8 x i16] } addrspace(1)* %4, i32 0, i32 0 ; <[8 x i16]*> [#uses=1]
- %6 = getelementptr [8 x i16], [8 x i16] addrspace(1)* %5, i32 0, i32 0 ; <i16*> [#uses=1]
- store i16 0, i16 addrspace(1)* %6, align 16
- call void @bar_as1(i16 addrspace(1)* %6)
+ %1 = getelementptr [3 x <{ { { [2 x { { i32 } }], [2 x i8], { i16 }, [2 x i8], i8, i8 } } }>], ptr addrspace(1) %b, i32 0, i32 0 ; <ptr> [#uses=1]
+ %2 = getelementptr <{ { { [2 x { { i32 } }], [2 x i8], { i16 }, [2 x i8], i8, i8 } } }>, ptr addrspace(1) %1, i32 0, i32 0 ; <ptr> [#uses=1]
+ %3 = getelementptr { { [2 x { { i32 } }], [2 x i8], { i16 }, [2 x i8], i8, i8 } }, ptr addrspace(1) %2, i32 0, i32 0 ; <ptr> [#uses=1]
+ %4 = getelementptr { [8 x i16] }, ptr addrspace(1) %3, i32 0, i32 0 ; <ptr> [#uses=1]
+ %5 = getelementptr [8 x i16], ptr addrspace(1) %4, i32 0, i32 0 ; <ptr> [#uses=1]
+ store i16 0, ptr addrspace(1) %5, align 16
+ call void @bar_as1(ptr addrspace(1) %5)
ret void
}
-declare void @bar_as1(i16 addrspace(1)*)
+declare void @bar_as1(ptr addrspace(1))
; This example was reduced from a test case in which InstCombine ran at least
; twice:
; - The first InstCombine run converted dbg.declares to dbg.values using the
-; LowerDbgDeclare utility. This produced a dbg.value(i32* %2, DW_OP_deref)
+; LowerDbgDeclare utility. This produced a dbg.value(ptr %2, DW_OP_deref)
; (this happens when the contents of an alloca are passed by-value), and a
; dbg.value(i32 %0) (due to the store of %0 into the alloca).
; - The second InstCombine run deleted the alloca (%2).
;
; RUN-ONCE-LABEL: @t1(
; RUN-ONCE-NEXT: llvm.dbg.value(metadata i32 %0, metadata [[t1_arg0:![0-9]+]], metadata !DIExpression())
-; RUN-ONCE-NEXT: llvm.dbg.value(metadata i32* undef, metadata [[t1_fake_ptr:![0-9]+]], metadata !DIExpression())
+; RUN-ONCE-NEXT: llvm.dbg.value(metadata ptr undef, metadata [[t1_fake_ptr:![0-9]+]], metadata !DIExpression())
; RUN-ONCE-NEXT: ret void
define void @t1(i32) !dbg !9 {
%2 = alloca i32, align 4
- store i32 %0, i32* %2, align 4
+ store i32 %0, ptr %2, align 4
call void @llvm.dbg.value(metadata i32 %0, metadata !14, metadata !DIExpression()), !dbg !15
- call void @llvm.dbg.value(metadata i32* %2, metadata !14, metadata !DIExpression(DW_OP_deref)), !dbg !15
- call void @llvm.dbg.value(metadata i32* %2, metadata !20, metadata !DIExpression()), !dbg !15
+ call void @llvm.dbg.value(metadata ptr %2, metadata !14, metadata !DIExpression(DW_OP_deref)), !dbg !15
+ call void @llvm.dbg.value(metadata ptr %2, metadata !20, metadata !DIExpression()), !dbg !15
ret void
}
; been produced by a frontend compiling at -O0.
;
; Here's what happens:
-; 1) We run InstCombine. This puts a dbg.value(i32* %x.addr, DW_OP_deref)
+; 1) We run InstCombine. This puts a dbg.value(ptr %x.addr, DW_OP_deref)
; before the call to @use, and a dbg.value(i32 %x) after the store.
; 2) We inline @use.
; 3) We run InstCombine again. The alloca %x.addr is erased. We should just get
-; dbg.value(i32 %x). There should be no leftover dbg.value(metadata i32*
+; dbg.value(i32 %x). There should be no leftover dbg.value(metadata ptr
; undef).
;
-;;; define void @use(i32* %addr) alwaysinline { ret void }
+;;; define void @use(ptr %addr) alwaysinline { ret void }
;;; define void @t2(i32 %x) !dbg !17 {
;;; %x.addr = alloca i32, align 4
-;;; store i32 %x, i32* %x.addr, align 4
-;;; call void @llvm.dbg.declare(metadata i32* %x.addr, metadata !18, metadata !DIExpression()), !dbg !19
-;;; call void @use(i32* %x.addr)
+;;; store i32 %x, ptr %x.addr, align 4
+;;; call void @llvm.dbg.declare(metadata ptr %x.addr, metadata !18, metadata !DIExpression()), !dbg !19
+;;; call void @use(ptr %x.addr)
;;; ret void
;;; }
; This would crash/assert because the logic for collectShuffleElements()
; does not consider the possibility of invalid insert/extract operands.
-define <4 x double> @invalid_extractelement(<2 x double> %a, <4 x double> %b, double* %p) {
+define <4 x double> @invalid_extractelement(<2 x double> %a, <4 x double> %b, ptr %p) {
; ANY-LABEL: @invalid_extractelement(
; ANY-NEXT: [[TMP1:%.*]] = shufflevector <2 x double> [[A:%.*]], <2 x double> poison, <4 x i32> <i32 0, i32 undef, i32 undef, i32 undef>
; ANY-NEXT: [[T4:%.*]] = shufflevector <4 x double> [[B:%.*]], <4 x double> [[TMP1]], <4 x i32> <i32 undef, i32 1, i32 4, i32 3>
; ANY-NEXT: [[E:%.*]] = extractelement <4 x double> [[B]], i64 1
-; ANY-NEXT: store double [[E]], double* [[P:%.*]], align 8
+; ANY-NEXT: store double [[E]], ptr [[P:%.*]], align 8
; ANY-NEXT: ret <4 x double> [[T4]]
;
%t3 = extractelement <2 x double> %a, i32 0
%t4 = insertelement <4 x double> %b, double %t3, i32 2
%e = extractelement <4 x double> %t4, i32 1
- store double %e, double* %p
+ store double %e, ptr %p
%e1 = extractelement <2 x double> %a, i32 4 ; invalid index
%r = insertelement <4 x double> %t4, double %e1, i64 0
ret <4 x double> %r
; This would crash/assert because the logic for collectShuffleElements()
; does not consider the possibility of invalid insert/extract operands.
-define <4 x double> @invalid_extractelement(<2 x double> %a, <4 x double> %b, double* %p) {
+define <4 x double> @invalid_extractelement(<2 x double> %a, <4 x double> %b, ptr %p) {
; ANY-LABEL: @invalid_extractelement(
; ANY-NEXT: [[TMP1:%.*]] = shufflevector <2 x double> [[A:%.*]], <2 x double> poison, <4 x i32> <i32 0, i32 undef, i32 undef, i32 undef>
; ANY-NEXT: [[T4:%.*]] = shufflevector <4 x double> [[B:%.*]], <4 x double> [[TMP1]], <4 x i32> <i32 undef, i32 1, i32 4, i32 3>
; ANY-NEXT: [[E:%.*]] = extractelement <4 x double> [[B]], i64 1
-; ANY-NEXT: store double [[E]], double* [[P:%.*]], align 8
+; ANY-NEXT: store double [[E]], ptr [[P:%.*]], align 8
; ANY-NEXT: ret <4 x double> [[T4]]
;
%t3 = extractelement <2 x double> %a, i32 0
%t4 = insertelement <4 x double> %b, double %t3, i32 2
%e = extractelement <4 x double> %t4, i32 1
- store double %e, double* %p
+ store double %e, ptr %p
%e1 = extractelement <2 x double> %a, i32 4 ; invalid index
%r = insertelement <4 x double> %t4, double %e1, i64 0
ret <4 x double> %r
%Complex = type { double, double }
; Check that instcombine preserves TBAA when narrowing loads
-define double @teststructextract(%Complex *%val) {
+define double @teststructextract(ptr %val) {
; CHECK: load double, {{.*}}, !tbaa
; CHECK-NOT: load %Complex
- %loaded = load %Complex, %Complex *%val, !tbaa !1
+ %loaded = load %Complex, ptr %val, !tbaa !1
%real = extractvalue %Complex %loaded, 0
ret double %real
}
-define double @testarrayextract([2 x double] *%val) {
+define double @testarrayextract(ptr %val) {
; CHECK: load double, {{.*}}, !tbaa
; CHECK-NOT: load [2 x double]
- %loaded = load [2 x double], [2 x double] *%val, !tbaa !1
+ %loaded = load [2 x double], ptr %val, !tbaa !1
%real = extractvalue [2 x double] %loaded, 0
ret double %real
}
; Check that inscombine preserves TBAA when breaking up stores
-define void @teststructinsert(%Complex *%loc, double %a, double %b) {
+define void @teststructinsert(ptr %loc, double %a, double %b) {
; CHECK: store double %a, {{.*}}, !tbaa
; CHECK: store double %b, {{.*}}, !tbaa
; CHECK-NOT: store %Complex
%inserted = insertvalue %Complex undef, double %a, 0
%inserted2 = insertvalue %Complex %inserted, double %b, 1
- store %Complex %inserted2, %Complex *%loc, !tbaa !1
+ store %Complex %inserted2, ptr %loc, !tbaa !1
ret void
}
-define void @testarrayinsert([2 x double] *%loc, double %a, double %b) {
+define void @testarrayinsert(ptr %loc, double %a, double %b) {
; CHECK: store double %a, {{.*}}, !tbaa
; CHECK: store double %b, {{.*}}, !tbaa
; CHECK-NOT: store [2 x double]
%inserted = insertvalue [2 x double] undef, double %a, 0
%inserted2 = insertvalue [2 x double] %inserted, double %b, 1
- store [2 x double] %inserted2, [2 x double] *%loc, !tbaa !1
+ store [2 x double] %inserted2, ptr %loc, !tbaa !1
ret void
}
}
; CHECK-LABEL: define i32 @extract2gep(
-; CHECK-NEXT: [[GEP:%[a-z0-9]+]] = getelementptr inbounds {{.*}}, {{.*}}* %pair, i64 0, i32 1
-; CHECK-NEXT: [[LOAD:%[A-Za-z0-9]+]] = load i32, i32* [[GEP]]
+; CHECK-NEXT: [[GEP:%[a-z0-9]+]] = getelementptr inbounds {{.*}}, ptr %pair, i64 0, i32 1
+; CHECK-NEXT: [[LOAD:%[A-Za-z0-9]+]] = load i32, ptr [[GEP]]
; CHECK-NEXT: store
; CHECK-NEXT: br label %loop
; CHECK-NOT: extractvalue
; CHECK: call {{.*}}(i32 [[LOAD]])
; CHECK-NOT: extractvalue
; CHECK: ret i32 [[LOAD]]
-define i32 @extract2gep({i16, i32}* %pair, i32* %P) {
+define i32 @extract2gep(ptr %pair, ptr %P) {
; The load + extractvalue should be converted
; to an inbounds gep + smaller load.
; The new load should be in the same spot as the old load.
- %L = load {i16, i32}, {i16, i32}* %pair
- store i32 0, i32* %P
+ %L = load {i16, i32}, ptr %pair
+ store i32 0, ptr %P
br label %loop
loop:
%E = extractvalue {i16, i32} %L, 1
%C = call i32 @baz(i32 %E)
- store i32 %C, i32* %P
+ store i32 %C, ptr %P
%cond = icmp eq i32 %C, 0
br i1 %cond, label %end, label %loop
}
; CHECK-LABEL: define i16 @doubleextract2gep(
-; CHECK-NEXT: [[GEP:%[a-z0-9]+]] = getelementptr inbounds {{.*}}, {{.*}}* %arg, i64 0, i32 1, i32 1
-; CHECK-NEXT: [[LOAD:%[A-Za-z0-9]+]] = load i16, i16* [[GEP]]
+; CHECK-NEXT: [[GEP:%[a-z0-9]+]] = getelementptr inbounds {{.*}}, ptr %arg, i64 0, i32 1, i32 1
+; CHECK-NEXT: [[LOAD:%[A-Za-z0-9]+]] = load i16, ptr [[GEP]]
; CHECK-NEXT: ret i16 [[LOAD]]
-define i16 @doubleextract2gep({i16, {i32, i16}}* %arg) {
+define i16 @doubleextract2gep(ptr %arg) {
; The load + extractvalues should be converted
; to a 3-index inbounds gep + smaller load.
- %L = load {i16, {i32, i16}}, {i16, {i32, i16}}* %arg
+ %L = load {i16, {i32, i16}}, ptr %arg
%E1 = extractvalue {i16, {i32, i16}} %L, 1
%E2 = extractvalue {i32, i16} %E1, 1
ret i16 %E2
; CHECK-NEXT: extractvalue
; CHECK-NEXT: add
; CHECK-NEXT: ret
-define i32 @nogep-multiuse({i32, i32}* %pair) {
+define i32 @nogep-multiuse(ptr %pair) {
; The load should be left unchanged since both parts are needed.
- %L = load volatile {i32, i32}, {i32, i32}* %pair
+ %L = load volatile {i32, i32}, ptr %pair
%LHS = extractvalue {i32, i32} %L, 0
%RHS = extractvalue {i32, i32} %L, 1
%R = add i32 %LHS, %RHS
; CHECK-NEXT: load volatile {{.*}} %pair
; CHECK-NEXT: extractvalue
; CHECK-NEXT: ret
-define i32 @nogep-volatile({i32, i32}* %pair) {
+define i32 @nogep-volatile(ptr %pair) {
; The load volatile should be left unchanged.
- %L = load volatile {i32, i32}, {i32, i32}* %pair
+ %L = load volatile {i32, i32}, ptr %pair
%E = extractvalue {i32, i32} %L, 1
ret i32 %E
}
; CHECK-LABEL: @multi_use_fabs_fpext(
; CHECK-NEXT: [[FPEXT:%.*]] = fpext float [[X:%.*]] to double
; CHECK-NEXT: [[FABS:%.*]] = call double @llvm.fabs.f64(double [[FPEXT]])
-; CHECK-NEXT: store volatile double [[FPEXT]], double* undef, align 8
+; CHECK-NEXT: store volatile double [[FPEXT]], ptr undef, align 8
; CHECK-NEXT: ret double [[FABS]]
;
%fpext = fpext float %x to double
%fabs = call double @llvm.fabs.f64(double %fpext)
- store volatile double %fpext, double* undef
+ store volatile double %fpext, ptr undef
ret double %fabs
}
}
; Don't crash.
-define i32 @test17(double %a, double (double)* %p) {
+define i32 @test17(double %a, ptr %p) {
; CHECK-LABEL: @test17(
; CHECK-NEXT: [[CALL:%.*]] = tail call double [[P:%.*]](double [[A:%.*]])
; CHECK-NEXT: [[CMP:%.*]] = fcmp ueq double [[CALL]], 0.000000e+00
; Negative test - uses
-define i1 @is_signbit_set_extra_use(double %x, double* %p) {
+define i1 @is_signbit_set_extra_use(double %x, ptr %p) {
; CHECK-LABEL: @is_signbit_set_extra_use(
; CHECK-NEXT: [[S:%.*]] = call double @llvm.copysign.f64(double 1.000000e+00, double [[X:%.*]])
-; CHECK-NEXT: store double [[S]], double* [[P:%.*]], align 8
+; CHECK-NEXT: store double [[S]], ptr [[P:%.*]], align 8
; CHECK-NEXT: [[R:%.*]] = fcmp olt double [[S]], 0.000000e+00
; CHECK-NEXT: ret i1 [[R]]
;
%s = call double @llvm.copysign.f64(double 1.0, double %x)
- store double %s, double* %p
+ store double %s, ptr %p
%r = fcmp olt double %s, 0.0
ret i1 %r
}
ret <2 x i1> %r
}
-define i1 @lossy_one(float %x, double* %p) {
+define i1 @lossy_one(float %x, ptr %p) {
; CHECK-LABEL: @lossy_one(
; CHECK-NEXT: [[E:%.*]] = fpext float [[X:%.*]] to double
-; CHECK-NEXT: store double [[E]], double* [[P:%.*]], align 8
+; CHECK-NEXT: store double [[E]], ptr [[P:%.*]], align 8
; CHECK-NEXT: [[R:%.*]] = fcmp ord float [[X]], 0.000000e+00
; CHECK-NEXT: ret i1 [[R]]
;
%e = fpext float %x to double
- store double %e, double* %p
+ store double %e, ptr %p
%r = fcmp one double %e, 0.1
ret i1 %r
}
ret <2 x i1> %r
}
-define i1 @lossy_oge(float %x, double* %p) {
+define i1 @lossy_oge(float %x, ptr %p) {
; CHECK-LABEL: @lossy_oge(
; CHECK-NEXT: [[E:%.*]] = fpext float [[X:%.*]] to double
-; CHECK-NEXT: store double [[E]], double* [[P:%.*]], align 8
+; CHECK-NEXT: store double [[E]], ptr [[P:%.*]], align 8
; CHECK-NEXT: [[R:%.*]] = fcmp oge double [[E]], 1.000000e-01
; CHECK-NEXT: ret i1 [[R]]
;
%e = fpext float %x to double
- store double %e, double* %p
+ store double %e, ptr %p
%r = fcmp oge double %e, 0.1
ret i1 %r
}
ret <2 x i1> %r
}
-define i1 @lossy_uge(float %x, double* %p) {
+define i1 @lossy_uge(float %x, ptr %p) {
; CHECK-LABEL: @lossy_uge(
; CHECK-NEXT: [[E:%.*]] = fpext float [[X:%.*]] to double
-; CHECK-NEXT: store double [[E]], double* [[P:%.*]], align 8
+; CHECK-NEXT: store double [[E]], ptr [[P:%.*]], align 8
; CHECK-NEXT: [[R:%.*]] = fcmp uge double [[E]], 1.000000e-01
; CHECK-NEXT: ret i1 [[R]]
;
%e = fpext float %x to double
- store double %e, double* %p
+ store double %e, ptr %p
%r = fcmp uge double %e, 0.1
ret i1 %r
}
ret <2 x i1> %cmp
}
-define i1 @fneg_olt(float %a, float* %q) {
+define i1 @fneg_olt(float %a, ptr %q) {
; CHECK-LABEL: @fneg_olt(
; CHECK-NEXT: [[FNEG:%.*]] = fneg float [[A:%.*]]
-; CHECK-NEXT: store float [[FNEG]], float* [[Q:%.*]], align 4
+; CHECK-NEXT: store float [[FNEG]], ptr [[Q:%.*]], align 4
; CHECK-NEXT: [[CMP:%.*]] = fcmp ogt float [[A]], 0.000000e+00
; CHECK-NEXT: ret i1 [[CMP]]
;
%fneg = fneg float %a
- store float %fneg, float* %q
+ store float %fneg, ptr %q
%cmp = fcmp olt float %fneg, %a
ret i1 %cmp
}
ret <2 x i1> %cmp
}
-define i1 @fneg_uge(float %a, float* %q) {
+define i1 @fneg_uge(float %a, ptr %q) {
; CHECK-LABEL: @fneg_uge(
; CHECK-NEXT: [[FNEG:%.*]] = fneg float [[A:%.*]]
-; CHECK-NEXT: store float [[FNEG]], float* [[Q:%.*]], align 4
+; CHECK-NEXT: store float [[FNEG]], ptr [[Q:%.*]], align 4
; CHECK-NEXT: [[CMP:%.*]] = fcmp ule float [[A]], 0.000000e+00
; CHECK-NEXT: ret i1 [[CMP]]
;
%fneg = fneg float %a
- store float %fneg, float* %q
+ store float %fneg, ptr %q
%cmp = fcmp uge float %fneg, %a
ret i1 %cmp
}
ret <2 x i1> %cmp
}
-define i1 @fneg_olt_swap(float %p, float* %q) {
+define i1 @fneg_olt_swap(float %p, ptr %q) {
; CHECK-LABEL: @fneg_olt_swap(
; CHECK-NEXT: [[A:%.*]] = fadd float [[P:%.*]], [[P]]
; CHECK-NEXT: [[FNEG:%.*]] = fneg float [[A]]
-; CHECK-NEXT: store float [[FNEG]], float* [[Q:%.*]], align 4
+; CHECK-NEXT: store float [[FNEG]], ptr [[Q:%.*]], align 4
; CHECK-NEXT: [[CMP:%.*]] = fcmp olt float [[A]], 0.000000e+00
; CHECK-NEXT: ret i1 [[CMP]]
;
%a = fadd float %p, %p ; thwart complexity-based canonicalization
%fneg = fneg float %a
- store float %fneg, float* %q
+ store float %fneg, ptr %q
%cmp = fcmp olt float %a, %fneg
ret i1 %cmp
}
ret <2 x i1> %cmp
}
-define i1 @fneg_uge_swap(float %p, float* %q) {
+define i1 @fneg_uge_swap(float %p, ptr %q) {
; CHECK-LABEL: @fneg_uge_swap(
; CHECK-NEXT: [[A:%.*]] = fadd float [[P:%.*]], [[P]]
; CHECK-NEXT: [[FNEG:%.*]] = fneg float [[A]]
-; CHECK-NEXT: store float [[FNEG]], float* [[Q:%.*]], align 4
+; CHECK-NEXT: store float [[FNEG]], ptr [[Q:%.*]], align 4
; CHECK-NEXT: [[CMP:%.*]] = fcmp uge float [[A]], 0.000000e+00
; CHECK-NEXT: ret i1 [[CMP]]
;
%a = fadd float %p, %p ; thwart complexity-based canonicalization
%fneg = fneg float %a
- store float %fneg, float* %q
+ store float %fneg, ptr %q
%cmp = fcmp uge float %a, %fneg
ret i1 %cmp
}
ret double %div
}
-define double @fdiv_reassoc_cos_strict_sin_strict(double %a, i32* dereferenceable(2) %dummy) {
+define double @fdiv_reassoc_cos_strict_sin_strict(double %a, ptr dereferenceable(2) %dummy) {
; CHECK-LABEL: @fdiv_reassoc_cos_strict_sin_strict(
; CHECK-NEXT: [[TAN:%.*]] = call reassoc double @tan(double [[A:%.*]]) #1
; CHECK-NEXT: [[TMP1:%.*]] = fdiv reassoc double 1.000000e+00, [[TAN]]
ret double %div
}
-define double @fdiv_reassoc_sin_strict_cos_strict(double %a, i32* dereferenceable(2) %dummy) {
+define double @fdiv_reassoc_sin_strict_cos_strict(double %a, ptr dereferenceable(2) %dummy) {
; CHECK-LABEL: @fdiv_reassoc_sin_strict_cos_strict(
; CHECK-NEXT: [[TAN:%.*]] = call reassoc double @tan(double [[A:%.*]]) #1
; CHECK-NEXT: ret double [[TAN]]
define float @fma_fneg_const_fneg_y(float %y, float %z) {
; CHECK-LABEL: @fma_fneg_const_fneg_y(
-; CHECK-NEXT: [[FMA:%.*]] = call float @llvm.fma.f32(float [[Y:%.*]], float bitcast (i32 ptrtoint (i32* @external to i32) to float), float [[Z:%.*]])
+; CHECK-NEXT: [[FMA:%.*]] = call float @llvm.fma.f32(float [[Y:%.*]], float bitcast (i32 ptrtoint (ptr @external to i32) to float), float [[Z:%.*]])
; CHECK-NEXT: ret float [[FMA]]
;
%y.fneg = fsub float -0.0, %y
- %fsub = fsub float -0.0, bitcast (i32 ptrtoint (i32* @external to i32) to float)
+ %fsub = fsub float -0.0, bitcast (i32 ptrtoint (ptr @external to i32) to float)
%fma = call float @llvm.fma.f32(float %fsub, float %y.fneg, float %z)
ret float %fma
}
define float @fma_unary_fneg_const_unary_fneg_y(float %y, float %z) {
; CHECK-LABEL: @fma_unary_fneg_const_unary_fneg_y(
-; CHECK-NEXT: [[FMA:%.*]] = call float @llvm.fma.f32(float [[Y:%.*]], float bitcast (i32 ptrtoint (i32* @external to i32) to float), float [[Z:%.*]])
+; CHECK-NEXT: [[FMA:%.*]] = call float @llvm.fma.f32(float [[Y:%.*]], float bitcast (i32 ptrtoint (ptr @external to i32) to float), float [[Z:%.*]])
; CHECK-NEXT: ret float [[FMA]]
;
%y.fneg = fneg float %y
- %external.fneg = fneg float bitcast (i32 ptrtoint (i32* @external to i32) to float)
+ %external.fneg = fneg float bitcast (i32 ptrtoint (ptr @external to i32) to float)
%fma = call float @llvm.fma.f32(float %external.fneg, float %y.fneg, float %z)
ret float %fma
}
define float @fma_fneg_x_fneg_const(float %x, float %z) {
; CHECK-LABEL: @fma_fneg_x_fneg_const(
-; CHECK-NEXT: [[FMA:%.*]] = call float @llvm.fma.f32(float [[X:%.*]], float bitcast (i32 ptrtoint (i32* @external to i32) to float), float [[Z:%.*]])
+; CHECK-NEXT: [[FMA:%.*]] = call float @llvm.fma.f32(float [[X:%.*]], float bitcast (i32 ptrtoint (ptr @external to i32) to float), float [[Z:%.*]])
; CHECK-NEXT: ret float [[FMA]]
;
%x.fneg = fsub float -0.0, %x
- %fsub = fsub float -0.0, bitcast (i32 ptrtoint (i32* @external to i32) to float)
+ %fsub = fsub float -0.0, bitcast (i32 ptrtoint (ptr @external to i32) to float)
%fma = call float @llvm.fma.f32(float %x.fneg, float %fsub, float %z)
ret float %fma
}
define float @fma_unary_fneg_x_unary_fneg_const(float %x, float %z) {
; CHECK-LABEL: @fma_unary_fneg_x_unary_fneg_const(
-; CHECK-NEXT: [[FMA:%.*]] = call float @llvm.fma.f32(float [[X:%.*]], float bitcast (i32 ptrtoint (i32* @external to i32) to float), float [[Z:%.*]])
+; CHECK-NEXT: [[FMA:%.*]] = call float @llvm.fma.f32(float [[X:%.*]], float bitcast (i32 ptrtoint (ptr @external to i32) to float), float [[Z:%.*]])
; CHECK-NEXT: ret float [[FMA]]
;
%x.fneg = fneg float %x
- %external.fneg = fneg float bitcast (i32 ptrtoint (i32* @external to i32) to float)
+ %external.fneg = fneg float bitcast (i32 ptrtoint (ptr @external to i32) to float)
%fma = call float @llvm.fma.f32(float %x.fneg, float %external.fneg, float %z)
ret float %fma
}
define float @fmuladd_fneg_const_fneg_y(float %y, float %z) {
; CHECK-LABEL: @fmuladd_fneg_const_fneg_y(
-; CHECK-NEXT: [[FMULADD:%.*]] = call float @llvm.fmuladd.f32(float [[Y:%.*]], float bitcast (i32 ptrtoint (i32* @external to i32) to float), float [[Z:%.*]])
+; CHECK-NEXT: [[FMULADD:%.*]] = call float @llvm.fmuladd.f32(float [[Y:%.*]], float bitcast (i32 ptrtoint (ptr @external to i32) to float), float [[Z:%.*]])
; CHECK-NEXT: ret float [[FMULADD]]
;
%y.fneg = fsub float -0.0, %y
- %fsub = fsub float -0.0, bitcast (i32 ptrtoint (i32* @external to i32) to float)
+ %fsub = fsub float -0.0, bitcast (i32 ptrtoint (ptr @external to i32) to float)
%fmuladd = call float @llvm.fmuladd.f32(float %fsub, float %y.fneg, float %z)
ret float %fmuladd
}
define float @fmuladd_unary_fneg_const_unary_fneg_y(float %y, float %z) {
; CHECK-LABEL: @fmuladd_unary_fneg_const_unary_fneg_y(
-; CHECK-NEXT: [[FMULADD:%.*]] = call float @llvm.fmuladd.f32(float [[Y:%.*]], float bitcast (i32 ptrtoint (i32* @external to i32) to float), float [[Z:%.*]])
+; CHECK-NEXT: [[FMULADD:%.*]] = call float @llvm.fmuladd.f32(float [[Y:%.*]], float bitcast (i32 ptrtoint (ptr @external to i32) to float), float [[Z:%.*]])
; CHECK-NEXT: ret float [[FMULADD]]
;
%y.fneg = fneg float %y
- %external.fneg = fneg float bitcast (i32 ptrtoint (i32* @external to i32) to float)
+ %external.fneg = fneg float bitcast (i32 ptrtoint (ptr @external to i32) to float)
%fmuladd = call float @llvm.fmuladd.f32(float %external.fneg, float %y.fneg, float %z)
ret float %fmuladd
}
define float @fmuladd_fneg_x_fneg_const(float %x, float %z) {
; CHECK-LABEL: @fmuladd_fneg_x_fneg_const(
-; CHECK-NEXT: [[FMULADD:%.*]] = call float @llvm.fmuladd.f32(float [[X:%.*]], float bitcast (i32 ptrtoint (i32* @external to i32) to float), float [[Z:%.*]])
+; CHECK-NEXT: [[FMULADD:%.*]] = call float @llvm.fmuladd.f32(float [[X:%.*]], float bitcast (i32 ptrtoint (ptr @external to i32) to float), float [[Z:%.*]])
; CHECK-NEXT: ret float [[FMULADD]]
;
%x.fneg = fsub float -0.0, %x
- %fsub = fsub float -0.0, bitcast (i32 ptrtoint (i32* @external to i32) to float)
+ %fsub = fsub float -0.0, bitcast (i32 ptrtoint (ptr @external to i32) to float)
%fmuladd = call float @llvm.fmuladd.f32(float %x.fneg, float %fsub, float %z)
ret float %fmuladd
}
define float @fmuladd_unary_fneg_x_unary_fneg_const(float %x, float %z) {
; CHECK-LABEL: @fmuladd_unary_fneg_x_unary_fneg_const(
-; CHECK-NEXT: [[FMULADD:%.*]] = call float @llvm.fmuladd.f32(float [[X:%.*]], float bitcast (i32 ptrtoint (i32* @external to i32) to float), float [[Z:%.*]])
+; CHECK-NEXT: [[FMULADD:%.*]] = call float @llvm.fmuladd.f32(float [[X:%.*]], float bitcast (i32 ptrtoint (ptr @external to i32) to float), float [[Z:%.*]])
; CHECK-NEXT: ret float [[FMULADD]]
;
%x.fneg = fneg float %x
- %external.fneg = fneg float bitcast (i32 ptrtoint (i32* @external to i32) to float)
+ %external.fneg = fneg float bitcast (i32 ptrtoint (ptr @external to i32) to float)
%fmuladd = call float @llvm.fmuladd.f32(float %x.fneg, float %external.fneg, float %z)
ret float %fmuladd
}
; poison
; Don't crash when attempting to cast a constant FMul to an instruction.
-define void @test8(i32* %inout, i1 %c1) {
+define void @test8(ptr %inout, i1 %c1) {
; CHECK-LABEL: @test8(
; CHECK-NEXT: entry:
; CHECK-NEXT: br label [[FOR_COND:%.*]]
; CHECK-NEXT: ret void
;
entry:
- %0 = load i32, i32* %inout, align 4
+ %0 = load i32, ptr %inout, align 4
%conv = uitofp i32 %0 to float
%vecinit = insertelement <4 x float> <float 0.000000e+00, float 0.000000e+00, float 0.000000e+00, float poison>, float %conv, i32 3
%sub = fsub <4 x float> <float -0.000000e+00, float -0.000000e+00, float -0.000000e+00, float -0.000000e+00>, %vecinit
ret double %squared
}
-define double @rsqrt_x_reassociate_extra_use(double %x, double * %p) {
+define double @rsqrt_x_reassociate_extra_use(double %x, ptr %p) {
; CHECK-LABEL: @rsqrt_x_reassociate_extra_use(
; CHECK-NEXT: [[SQRT:%.*]] = call double @llvm.sqrt.f64(double [[X:%.*]])
; CHECK-NEXT: [[RSQRT:%.*]] = fdiv double 1.000000e+00, [[SQRT]]
; CHECK-NEXT: [[RES:%.*]] = fdiv reassoc nsz double [[X:%.*]], [[SQRT]]
-; CHECK-NEXT: store double [[RSQRT]], double* [[P:%.*]], align 8
+; CHECK-NEXT: store double [[RSQRT]], ptr [[P:%.*]], align 8
; CHECK-NEXT: ret double [[RES]]
;
%sqrt = call double @llvm.sqrt.f64(double %x)
%rsqrt = fdiv double 1.0, %sqrt
%res = fmul reassoc nsz double %rsqrt, %x
- store double %rsqrt, double* %p
+ store double %rsqrt, ptr %p
ret double %res
}
-define <2 x float> @x_add_y_rsqrt_reassociate_extra_use(<2 x float> %x, <2 x float> %y, <2 x float>* %p) {
+define <2 x float> @x_add_y_rsqrt_reassociate_extra_use(<2 x float> %x, <2 x float> %y, ptr %p) {
; CHECK-LABEL: @x_add_y_rsqrt_reassociate_extra_use(
; CHECK-NEXT: [[ADD:%.*]] = fadd fast <2 x float> [[X:%.*]], [[Y:%.*]]
; CHECK-NEXT: [[SQRT:%.*]] = call fast <2 x float> @llvm.sqrt.v2f32(<2 x float> [[ADD]])
; CHECK-NEXT: [[RSQRT:%.*]] = fdiv fast <2 x float> <float 1.000000e+00, float 1.000000e+00>, [[SQRT]]
; CHECK-NEXT: [[RES:%.*]] = fdiv fast <2 x float> [[ADD]], [[SQRT]]
-; CHECK-NEXT: store <2 x float> [[RSQRT]], <2 x float>* [[P:%.*]], align 8
+; CHECK-NEXT: store <2 x float> [[RSQRT]], ptr [[P:%.*]], align 8
; CHECK-NEXT: ret <2 x float> [[RES]]
;
%add = fadd fast <2 x float> %x, %y ; thwart complexity-based canonicalization
%sqrt = call fast <2 x float> @llvm.sqrt.v2f32(<2 x float> %add)
%rsqrt = fdiv fast <2 x float> <float 1.0, float 1.0>, %sqrt
%res = fmul fast <2 x float> %add, %rsqrt
- store <2 x float> %rsqrt, <2 x float>* %p
+ store <2 x float> %rsqrt, ptr %p
ret <2 x float> %res
}
}
; Don't crash when attempting to cast a constant FMul to an instruction.
-define void @test8(i32* %inout, i1 %c1) {
+define void @test8(ptr %inout, i1 %c1) {
; CHECK-LABEL: @test8(
; CHECK-NEXT: entry:
; CHECK-NEXT: br label [[FOR_COND:%.*]]
; CHECK-NEXT: ret void
;
entry:
- %0 = load i32, i32* %inout, align 4
+ %0 = load i32, ptr %inout, align 4
%conv = uitofp i32 %0 to float
%vecinit = insertelement <4 x float> <float 0.000000e+00, float 0.000000e+00, float 0.000000e+00, float undef>, float %conv, i32 3
%sub = fsub <4 x float> <float -0.000000e+00, float -0.000000e+00, float -0.000000e+00, float -0.000000e+00>, %vecinit
define float @fdiv_constant_numerator_fmul_extra_use(float %x) {
; CHECK-LABEL: @fdiv_constant_numerator_fmul_extra_use(
; CHECK-NEXT: [[DIV:%.*]] = fdiv fast float 1.000000e+00, [[X:%.*]]
-; CHECK-NEXT: store float [[DIV]], float* @fmul2_external, align 4
+; CHECK-NEXT: store float [[DIV]], ptr @fmul2_external, align 4
; CHECK-NEXT: [[MUL:%.*]] = fmul fast float [[DIV]], 2.000000e+00
; CHECK-NEXT: ret float [[MUL]]
;
%div = fdiv fast float 1.0, %x
- store float %div, float* @fmul2_external
+ store float %div, ptr @fmul2_external
%mul = fmul fast float %div, 2.0
ret float %mul
}
; Avoid infinite looping by moving negation out of a constant expression.
-@g = external global {[2 x i8*]}, align 1
+@g = external global {[2 x ptr]}, align 1
define double @fmul_negated_constant_expression(double %x) {
; CHECK-LABEL: @fmul_negated_constant_expression(
-; CHECK-NEXT: [[FSUB:%.*]] = fneg double bitcast (i64 ptrtoint (i8** getelementptr inbounds ({ [2 x i8*] }, { [2 x i8*] }* @g, i64 0, inrange i32 0, i64 2) to i64) to double)
+; CHECK-NEXT: [[FSUB:%.*]] = fneg double bitcast (i64 ptrtoint (ptr getelementptr inbounds ({ [2 x ptr] }, ptr @g, i64 0, inrange i32 0, i64 2) to i64) to double)
; CHECK-NEXT: [[R:%.*]] = fmul double [[FSUB]], [[X:%.*]]
; CHECK-NEXT: ret double [[R]]
;
- %fsub = fsub double -0.000000e+00, bitcast (i64 ptrtoint (i8** getelementptr inbounds ({ [2 x i8*] }, { [2 x i8*] }* @g, i64 0, inrange i32 0, i64 2) to i64) to double)
+ %fsub = fsub double -0.000000e+00, bitcast (i64 ptrtoint (ptr getelementptr inbounds ({ [2 x ptr] }, ptr @g, i64 0, inrange i32 0, i64 2) to i64) to double)
%r = fmul double %x, %fsub
ret double %r
}
define float @fneg_nsz_fadd_constant_expr(float %x) {
; CHECK-LABEL: @fneg_nsz_fadd_constant_expr(
-; CHECK-NEXT: [[A:%.*]] = fadd float [[X:%.*]], bitcast (i32 ptrtoint (i16* @g to i32) to float)
+; CHECK-NEXT: [[A:%.*]] = fadd float [[X:%.*]], bitcast (i32 ptrtoint (ptr @g to i32) to float)
; CHECK-NEXT: [[R:%.*]] = fneg nsz float [[A]]
; CHECK-NEXT: ret float [[R]]
;
- %a = fadd float %x, bitcast (i32 ptrtoint (i16* @g to i32) to float)
+ %a = fadd float %x, bitcast (i32 ptrtoint (ptr @g to i32) to float)
%r = fneg nsz float %a
ret float %r
}
define float @fake_fneg_nsz_fadd_constant_expr(float %x) {
; CHECK-LABEL: @fake_fneg_nsz_fadd_constant_expr(
-; CHECK-NEXT: [[A:%.*]] = fadd float [[X:%.*]], bitcast (i32 ptrtoint (i16* @g to i32) to float)
+; CHECK-NEXT: [[A:%.*]] = fadd float [[X:%.*]], bitcast (i32 ptrtoint (ptr @g to i32) to float)
; CHECK-NEXT: [[R:%.*]] = fneg nsz float [[A]]
; CHECK-NEXT: ret float [[R]]
;
- %a = fadd float %x, bitcast (i32 ptrtoint (i16* @g to i32) to float)
+ %a = fadd float %x, bitcast (i32 ptrtoint (ptr @g to i32) to float)
%r = fsub nsz float -0.0, %a
ret float %r
}
; CHECK-LABEL: @f(
; CHECK-NEXT: ret i1 false
;
- %b = and i1 %x, icmp eq (i8* inttoptr (i32 1 to i8*), i8* inttoptr (i32 2 to i8*))
+ %b = and i1 %x, icmp eq (ptr inttoptr (i32 1 to ptr), ptr inttoptr (i32 2 to ptr))
ret i1 %b
}
; CHECK-LABEL: @f_logical(
; CHECK-NEXT: ret i1 false
;
- %b = select i1 %x, i1 icmp eq (i8* inttoptr (i32 1 to i8*), i8* inttoptr (i32 2 to i8*)), i1 false
+ %b = select i1 %x, i1 icmp eq (ptr inttoptr (i32 1 to ptr), ptr inttoptr (i32 2 to ptr)), i1 false
ret i1 %b
}
; CHECK-LABEL: @g(
; CHECK-NEXT: ret i32 [[X:%.*]]
;
- %b = add i32 %x, zext (i1 icmp eq (i8* inttoptr (i32 1000000 to i8*), i8* inttoptr (i32 2000000 to i8*)) to i32)
+ %b = add i32 %x, zext (i1 icmp eq (ptr inttoptr (i32 1000000 to ptr), ptr inttoptr (i32 2000000 to ptr)) to i32)
ret i32 %b
}
; RUN: opt -passes=instcombine -S < %s | FileCheck %s
-@g1 = common global i32* null, align 8
+@g1 = common global ptr null, align 8
%struct.S1 = type { i32, float }
%struct.S2 = type { float, i32 }
; Check that instcombine preserves metadata when it merges two loads.
;
; CHECK: return:
-; CHECK: load i32*, i32** %{{[a-z0-9.]+}}, align 8, !nonnull ![[EMPTYNODE:[0-9]+]]
-; CHECK: load i32, i32* %{{[a-z0-9.]+}}, align 4, !tbaa ![[TBAA:[0-9]+]], !range ![[RANGE:[0-9]+]], !invariant.load ![[EMPTYNODE:[0-9]+]], !alias.scope ![[ALIAS_SCOPE:[0-9]+]], !noalias ![[NOALIAS:[0-9]+]]
+; CHECK: load ptr, ptr %{{[a-z0-9.]+}}, align 8, !nonnull ![[EMPTYNODE:[0-9]+]]
+; CHECK: load i32, ptr %{{[a-z0-9.]+}}, align 4, !tbaa ![[TBAA:[0-9]+]], !range ![[RANGE:[0-9]+]], !invariant.load ![[EMPTYNODE:[0-9]+]], !alias.scope ![[ALIAS_SCOPE:[0-9]+]], !noalias ![[NOALIAS:[0-9]+]]
; Function Attrs: nounwind ssp uwtable
-define i32 @phi_load_metadata(%struct.S1* %s1, %struct.S2* %s2, i32 %c, i32** %x0, i32 **%x1) #0 {
+define i32 @phi_load_metadata(ptr %s1, ptr %s2, i32 %c, ptr %x0, ptr %x1) #0 {
entry:
%tobool = icmp eq i32 %c, 0
br i1 %tobool, label %if.end, label %if.then
if.then: ; preds = %entry
- %i = getelementptr inbounds %struct.S2, %struct.S2* %s2, i64 0, i32 1
- %val = load i32, i32* %i, align 4, !tbaa !0, !alias.scope !13, !noalias !14, !invariant.load !17, !range !18
- %p0 = load i32*, i32** %x0, align 8, !nonnull !17
+ %i = getelementptr inbounds %struct.S2, ptr %s2, i64 0, i32 1
+ %val = load i32, ptr %i, align 4, !tbaa !0, !alias.scope !13, !noalias !14, !invariant.load !17, !range !18
+ %p0 = load ptr, ptr %x0, align 8, !nonnull !17
br label %return
if.end: ; preds = %entry
- %i2 = getelementptr inbounds %struct.S1, %struct.S1* %s1, i64 0, i32 0
- %val2 = load i32, i32* %i2, align 4, !tbaa !2, !alias.scope !15, !noalias !16, !invariant.load !17, !range !19
- %p1 = load i32*, i32** %x1, align 8, !nonnull !17
+ %val2 = load i32, ptr %s1, align 4, !tbaa !2, !alias.scope !15, !noalias !16, !invariant.load !17, !range !19
+ %p1 = load ptr, ptr %x1, align 8, !nonnull !17
br label %return
return: ; preds = %if.end, %if.then
%retval = phi i32 [ %val, %if.then ], [ %val2, %if.end ]
- %pval = phi i32* [ %p0, %if.then ], [ %p1, %if.end ]
- store i32* %pval, i32** @g1, align 8
+ %pval = phi ptr [ %p0, %if.then ], [ %p1, %if.end ]
+ store ptr %pval, ptr @g1, align 8
ret i32 %retval
}
; CHECK-NOT: select
-define void @foo(<4 x i32> *%A, <4 x i32> *%B, <4 x i32> *%C, <4 x i32> *%D,
- <4 x i32> *%E, <4 x i32> *%F, <4 x i32> *%G, <4 x i32> *%H,
- <4 x i32> *%I, <4 x i32> *%J, <4 x i32> *%K, <4 x i32> *%L,
- <4 x i32> *%M, <4 x i32> *%N, <4 x i32> *%O, <4 x i32> *%P,
- <4 x i32> *%Q, <4 x i32> *%R, <4 x i32> *%S, <4 x i32> *%T,
- <4 x i32> *%U, <4 x i32> *%V, <4 x i32> *%W, <4 x i32> *%X,
- <4 x i32> *%Y, <4 x i32> *%Z, <4 x i32> *%BA, <4 x i32> *%BB,
- <4 x i32> *%BC, <4 x i32> *%BD, <4 x i32> *%BE, <4 x i32> *%BF,
- <4 x i32> *%BG, <4 x i32> *%BH, <4 x i32> *%BI, <4 x i32> *%BJ,
- <4 x i32> *%BK, <4 x i32> *%BL, <4 x i32> *%BM, <4 x i32> *%BN,
- <4 x i32> *%BO, <4 x i32> *%BP, <4 x i32> *%BQ, <4 x i32> *%BR,
- <4 x i32> *%BS, <4 x i32> *%BT, <4 x i32> *%BU, <4 x i32> *%BV,
- <4 x i32> *%BW, <4 x i32> *%BX, <4 x i32> *%BY, <4 x i32> *%BZ,
- <4 x i32> *%CA, <4 x i32> *%CB, <4 x i32> *%CC, <4 x i32> *%CD,
- <4 x i32> *%CE, <4 x i32> *%CF, <4 x i32> *%CG, <4 x i32> *%CH,
- <4 x i32> *%CI, <4 x i32> *%CJ, <4 x i32> *%CK, <4 x i32> *%CL) {
+define void @foo(ptr %A, ptr %B, ptr %C, ptr %D,
+ ptr %E, ptr %F, ptr %G, ptr %H,
+ ptr %I, ptr %J, ptr %K, ptr %L,
+ ptr %M, ptr %N, ptr %O, ptr %P,
+ ptr %Q, ptr %R, ptr %S, ptr %T,
+ ptr %U, ptr %V, ptr %W, ptr %X,
+ ptr %Y, ptr %Z, ptr %BA, ptr %BB,
+ ptr %BC, ptr %BD, ptr %BE, ptr %BF,
+ ptr %BG, ptr %BH, ptr %BI, ptr %BJ,
+ ptr %BK, ptr %BL, ptr %BM, ptr %BN,
+ ptr %BO, ptr %BP, ptr %BQ, ptr %BR,
+ ptr %BS, ptr %BT, ptr %BU, ptr %BV,
+ ptr %BW, ptr %BX, ptr %BY, ptr %BZ,
+ ptr %CA, ptr %CB, ptr %CC, ptr %CD,
+ ptr %CE, ptr %CF, ptr %CG, ptr %CH,
+ ptr %CI, ptr %CJ, ptr %CK, ptr %CL) {
%a = select <4 x i1> <i1 false, i1 false, i1 false, i1 false>, <4 x i32> zeroinitializer, <4 x i32> <i32 9, i32 87, i32 57, i32 8>
%b = select <4 x i1> <i1 true, i1 false, i1 false, i1 false>, <4 x i32> zeroinitializer, <4 x i32> <i32 44, i32 99, i32 49, i32 29>
%c = select <4 x i1> <i1 false, i1 true, i1 false, i1 false>, <4 x i32> zeroinitializer, <4 x i32> <i32 15, i32 18, i32 53, i32 84>
%cj = select <4 x i1> <i1 true, i1 false, i1 true, i1 true>, <4 x i32> zeroinitializer, <4 x i32> zeroinitializer
%ck = select <4 x i1> <i1 false, i1 true, i1 true, i1 true>, <4 x i32> zeroinitializer, <4 x i32> zeroinitializer
%cl = select <4 x i1> <i1 true, i1 true, i1 true, i1 true>, <4 x i32> zeroinitializer, <4 x i32> zeroinitializer
- store <4 x i32> %a, <4 x i32>* %A
- store <4 x i32> %b, <4 x i32>* %B
- store <4 x i32> %c, <4 x i32>* %C
- store <4 x i32> %d, <4 x i32>* %D
- store <4 x i32> %e, <4 x i32>* %E
- store <4 x i32> %f, <4 x i32>* %F
- store <4 x i32> %g, <4 x i32>* %G
- store <4 x i32> %h, <4 x i32>* %H
- store <4 x i32> %i, <4 x i32>* %I
- store <4 x i32> %j, <4 x i32>* %J
- store <4 x i32> %k, <4 x i32>* %K
- store <4 x i32> %l, <4 x i32>* %L
- store <4 x i32> %m, <4 x i32>* %M
- store <4 x i32> %n, <4 x i32>* %N
- store <4 x i32> %o, <4 x i32>* %O
- store <4 x i32> %p, <4 x i32>* %P
- store <4 x i32> %q, <4 x i32>* %Q
- store <4 x i32> %r, <4 x i32>* %R
- store <4 x i32> %s, <4 x i32>* %S
- store <4 x i32> %t, <4 x i32>* %T
- store <4 x i32> %u, <4 x i32>* %U
- store <4 x i32> %v, <4 x i32>* %V
- store <4 x i32> %w, <4 x i32>* %W
- store <4 x i32> %x, <4 x i32>* %X
- store <4 x i32> %y, <4 x i32>* %Y
- store <4 x i32> %z, <4 x i32>* %Z
- store <4 x i32> %ba, <4 x i32>* %BA
- store <4 x i32> %bb, <4 x i32>* %BB
- store <4 x i32> %bc, <4 x i32>* %BC
- store <4 x i32> %bd, <4 x i32>* %BD
- store <4 x i32> %be, <4 x i32>* %BE
- store <4 x i32> %bf, <4 x i32>* %BF
- store <4 x i32> %bg, <4 x i32>* %BG
- store <4 x i32> %bh, <4 x i32>* %BH
- store <4 x i32> %bi, <4 x i32>* %BI
- store <4 x i32> %bj, <4 x i32>* %BJ
- store <4 x i32> %bk, <4 x i32>* %BK
- store <4 x i32> %bl, <4 x i32>* %BL
- store <4 x i32> %bm, <4 x i32>* %BM
- store <4 x i32> %bn, <4 x i32>* %BN
- store <4 x i32> %bo, <4 x i32>* %BO
- store <4 x i32> %bp, <4 x i32>* %BP
- store <4 x i32> %bq, <4 x i32>* %BQ
- store <4 x i32> %br, <4 x i32>* %BR
- store <4 x i32> %bs, <4 x i32>* %BS
- store <4 x i32> %bt, <4 x i32>* %BT
- store <4 x i32> %bu, <4 x i32>* %BU
- store <4 x i32> %bv, <4 x i32>* %BV
- store <4 x i32> %bw, <4 x i32>* %BW
- store <4 x i32> %bx, <4 x i32>* %BX
- store <4 x i32> %by, <4 x i32>* %BY
- store <4 x i32> %bz, <4 x i32>* %BZ
- store <4 x i32> %ca, <4 x i32>* %CA
- store <4 x i32> %cb, <4 x i32>* %CB
- store <4 x i32> %cc, <4 x i32>* %CC
- store <4 x i32> %cd, <4 x i32>* %CD
- store <4 x i32> %ce, <4 x i32>* %CE
- store <4 x i32> %cf, <4 x i32>* %CF
- store <4 x i32> %cg, <4 x i32>* %CG
- store <4 x i32> %ch, <4 x i32>* %CH
- store <4 x i32> %ci, <4 x i32>* %CI
- store <4 x i32> %cj, <4 x i32>* %CJ
- store <4 x i32> %ck, <4 x i32>* %CK
- store <4 x i32> %cl, <4 x i32>* %CL
+ store <4 x i32> %a, ptr %A
+ store <4 x i32> %b, ptr %B
+ store <4 x i32> %c, ptr %C
+ store <4 x i32> %d, ptr %D
+ store <4 x i32> %e, ptr %E
+ store <4 x i32> %f, ptr %F
+ store <4 x i32> %g, ptr %G
+ store <4 x i32> %h, ptr %H
+ store <4 x i32> %i, ptr %I
+ store <4 x i32> %j, ptr %J
+ store <4 x i32> %k, ptr %K
+ store <4 x i32> %l, ptr %L
+ store <4 x i32> %m, ptr %M
+ store <4 x i32> %n, ptr %N
+ store <4 x i32> %o, ptr %O
+ store <4 x i32> %p, ptr %P
+ store <4 x i32> %q, ptr %Q
+ store <4 x i32> %r, ptr %R
+ store <4 x i32> %s, ptr %S
+ store <4 x i32> %t, ptr %T
+ store <4 x i32> %u, ptr %U
+ store <4 x i32> %v, ptr %V
+ store <4 x i32> %w, ptr %W
+ store <4 x i32> %x, ptr %X
+ store <4 x i32> %y, ptr %Y
+ store <4 x i32> %z, ptr %Z
+ store <4 x i32> %ba, ptr %BA
+ store <4 x i32> %bb, ptr %BB
+ store <4 x i32> %bc, ptr %BC
+ store <4 x i32> %bd, ptr %BD
+ store <4 x i32> %be, ptr %BE
+ store <4 x i32> %bf, ptr %BF
+ store <4 x i32> %bg, ptr %BG
+ store <4 x i32> %bh, ptr %BH
+ store <4 x i32> %bi, ptr %BI
+ store <4 x i32> %bj, ptr %BJ
+ store <4 x i32> %bk, ptr %BK
+ store <4 x i32> %bl, ptr %BL
+ store <4 x i32> %bm, ptr %BM
+ store <4 x i32> %bn, ptr %BN
+ store <4 x i32> %bo, ptr %BO
+ store <4 x i32> %bp, ptr %BP
+ store <4 x i32> %bq, ptr %BQ
+ store <4 x i32> %br, ptr %BR
+ store <4 x i32> %bs, ptr %BS
+ store <4 x i32> %bt, ptr %BT
+ store <4 x i32> %bu, ptr %BU
+ store <4 x i32> %bv, ptr %BV
+ store <4 x i32> %bw, ptr %BW
+ store <4 x i32> %bx, ptr %BX
+ store <4 x i32> %by, ptr %BY
+ store <4 x i32> %bz, ptr %BZ
+ store <4 x i32> %ca, ptr %CA
+ store <4 x i32> %cb, ptr %CB
+ store <4 x i32> %cc, ptr %CC
+ store <4 x i32> %cd, ptr %CD
+ store <4 x i32> %ce, ptr %CE
+ store <4 x i32> %cf, ptr %CF
+ store <4 x i32> %cg, ptr %CG
+ store <4 x i32> %ch, ptr %CH
+ store <4 x i32> %ci, ptr %CI
+ store <4 x i32> %cj, ptr %CJ
+ store <4 x i32> %ck, ptr %CK
+ store <4 x i32> %cl, ptr %CL
ret void
}
%s0 = phi i64 [ 0, %bb8 ], [ %r21, %bb30 ]
%l0 = phi i64 [ -2222, %bb8 ], [ %r23, %bb30 ]
%r2 = add i64 %s0, %B
- %r3 = inttoptr i64 %r2 to <2 x double>*
- %r4 = load <2 x double>, <2 x double>* %r3, align 8
+ %r3 = inttoptr i64 %r2 to ptr
+ %r4 = load <2 x double>, ptr %r3, align 8
%r6 = bitcast <2 x double> %r4 to <2 x i64>
%r7 = bitcast <2 x double> zeroinitializer to <2 x i64>
%r8 = insertelement <2 x i64> poison, i64 9223372036854775807, i32 0
%r14 = or <2 x i64> %r12, %r13
%r15 = bitcast <2 x i64> %r14 to <2 x double>
%r18 = add i64 %s0, %A
- %r19 = inttoptr i64 %r18 to <2 x double>*
- store <2 x double> %r15, <2 x double>* %r19, align 8
+ %r19 = inttoptr i64 %r18 to ptr
+ store <2 x double> %r15, ptr %r19, align 8
%r21 = add i64 16, %s0
%r23 = add i64 1, %l0
%r25 = icmp slt i64 %r23, 0
%s0 = phi i64 [ 0, %bb8 ], [ %r21, %bb30 ]
%l0 = phi i64 [ -2222, %bb8 ], [ %r23, %bb30 ]
%r2 = add i64 %s0, %B
- %r3 = inttoptr i64 %r2 to <2 x double>*
- %r4 = load <2 x double>, <2 x double>* %r3, align 8
+ %r3 = inttoptr i64 %r2 to ptr
+ %r4 = load <2 x double>, ptr %r3, align 8
%r6 = bitcast <2 x double> %r4 to <2 x i64>
%r7 = bitcast <2 x double> zeroinitializer to <2 x i64>
%r8 = insertelement <2 x i64> undef, i64 9223372036854775807, i32 0
%r14 = or <2 x i64> %r12, %r13
%r15 = bitcast <2 x i64> %r14 to <2 x double>
%r18 = add i64 %s0, %A
- %r19 = inttoptr i64 %r18 to <2 x double>*
- store <2 x double> %r15, <2 x double>* %r19, align 8
+ %r19 = inttoptr i64 %r18 to ptr
+ store <2 x double> %r15, ptr %r19, align 8
%r21 = add i64 16, %s0
%r23 = add i64 1, %l0
%r25 = icmp slt i64 %r23, 0
; CHECK-LABEL: @gep_constexpr_inttoptr(
; CHECK-NEXT: ret ptr getelementptr (i8, ptr inttoptr (i64 mul (i64 ptrtoint (ptr @g to i64), i64 2) to ptr), i64 20)
;
- ret ptr getelementptr([16 x i16], ptr inttoptr (i64 mul (i64 ptrtoint ([16 x i16]* @g to i64), i64 2) to ptr), i64 0, i64 10)
+ ret ptr getelementptr([16 x i16], ptr inttoptr (i64 mul (i64 ptrtoint (ptr @g to i64), i64 2) to ptr), i64 0, i64 10)
}
; RUN: opt < %s -passes='require<profile-summary>,function(instcombine)' -pgso -S | FileCheck %s -check-prefix=PGSO
; RUN: opt < %s -instcombine -pgso=false -S | FileCheck %s -check-prefix=NPGSO
-%struct._IO_FILE = type { i32, i8*, i8*, i8*, i8*, i8*, i8*, i8*, i8*, i8*, i8*, i8*, %struct._IO_marker*, %struct._IO_FILE*, i32, i32, i32, i16, i8, [1 x i8], i8*, i64, i8*, i8*, i8*, i8*, i32, i32, [40 x i8] }
-%struct._IO_marker = type { %struct._IO_marker*, %struct._IO_FILE*, i32 }
+%struct._IO_FILE = type { i32, ptr, ptr, ptr, ptr, ptr, ptr, ptr, ptr, ptr, ptr, ptr, ptr, ptr, i32, i32, i32, i16, i8, [1 x i8], ptr, i64, ptr, ptr, ptr, ptr, i32, i32, [40 x i8] }
+%struct._IO_marker = type { ptr, ptr, i32 }
@.str = private unnamed_addr constant [10 x i8] c"mylog.txt\00", align 1
@.str.1 = private unnamed_addr constant [2 x i8] c"a\00", align 1
; CHECK-NOT: call i64 @fwrite
; CHECK: call i32 @fputs
- %call = tail call %struct._IO_FILE* @fopen(i8* getelementptr inbounds ([10 x i8], [10 x i8]* @.str, i32 0, i32 0), i8* getelementptr inbounds ([2 x i8], [2 x i8]* @.str.1, i32 0, i32 0)) #2
- %call1 = tail call i32 @fputs(i8* getelementptr inbounds ([27 x i8], [27 x i8]* @.str.2, i32 0, i32 0), %struct._IO_FILE* %call) #2
+ %call = tail call ptr @fopen(ptr @.str, ptr @.str.1) #2
+ %call1 = tail call i32 @fputs(ptr @.str.2, ptr %call) #2
ret i32 0
}
-declare noalias %struct._IO_FILE* @fopen(i8* nocapture readonly, i8* nocapture readonly) local_unnamed_addr #1
-declare i32 @fputs(i8* nocapture readonly, %struct._IO_FILE* nocapture) local_unnamed_addr #1
+declare noalias ptr @fopen(ptr nocapture readonly, ptr nocapture readonly) local_unnamed_addr #1
+declare i32 @fputs(ptr nocapture readonly, ptr nocapture) local_unnamed_addr #1
attributes #0 = { nounwind optsize }
attributes #1 = { nounwind optsize }
; NPGSO: call i64 @fwrite
; NPGSO-NOT: call i32 @fputs
- %call = tail call %struct._IO_FILE* @fopen(i8* getelementptr inbounds ([10 x i8], [10 x i8]* @.str, i32 0, i32 0), i8* getelementptr inbounds ([2 x i8], [2 x i8]* @.str.1, i32 0, i32 0)) #2
- %call1 = tail call i32 @fputs(i8* getelementptr inbounds ([27 x i8], [27 x i8]* @.str.2, i32 0, i32 0), %struct._IO_FILE* %call) #2
+ %call = tail call ptr @fopen(ptr @.str, ptr @.str.1) #2
+ %call1 = tail call i32 @fputs(ptr @.str.2, ptr %call) #2
ret i32 0
}
; Check that no freeze instruction gets inserted before landingpad in a basic block
; RUN: opt < %s -passes=instcombine -S | FileCheck %s
-define i32 @propagate_freeze_in_landingpad() personality i32* ()* null {
+define i32 @propagate_freeze_in_landingpad() personality ptr null {
; CHECK-LABEL: @propagate_freeze_in_landingpad(
; CHECK-NEXT: entry:
; CHECK-NEXT: br label [[INVOKE_BB1:%.*]]
; CHECK-NEXT: br label [[INVOKE_BB1]]
; CHECK: exceptional_return:
; CHECK-NEXT: [[PHI:%.*]] = phi i32 [ [[X]], [[INVOKE_BB1]] ], [ 0, [[INVOKE_BB2]] ]
-; CHECK-NEXT: [[LANDING_PAD:%.*]] = landingpad { i8*, i32 }
+; CHECK-NEXT: [[LANDING_PAD:%.*]] = landingpad { ptr, i32 }
; CHECK-NEXT: cleanup
; CHECK-NEXT: [[FR:%.*]] = freeze i32 [[PHI]]
; CHECK-NEXT: [[RES:%.*]] = shl i32 [[FR]], 1
exceptional_return: ; preds = %invoke.bb2, %invoke.bb1
%phi = phi i32 [ %x, %invoke.bb1 ], [ 0, %invoke.bb2 ]
- %landing_pad = landingpad { i8*, i32 }
+ %landing_pad = landingpad { ptr, i32 }
cleanup
%fr = freeze i32 %phi
%res = add i32 %fr, %phi
; CHECK-NEXT: i8 1, label [[C:%.*]]
; CHECK-NEXT: ]
; CHECK: A:
-; CHECK-NEXT: [[PHI_FR:%.*]] = freeze i32 ptrtoint (i8* getelementptr inbounds (i8, i8* @glb, i64 2) to i32)
+; CHECK-NEXT: [[PHI_FR:%.*]] = freeze i32 ptrtoint (ptr getelementptr inbounds (i8, ptr @glb, i64 2) to i32)
; CHECK-NEXT: br label [[D:%.*]]
; CHECK: B:
; CHECK-NEXT: br label [[D]]
C:
br label %D
D:
- %y = phi i32 [ptrtoint (i8* getelementptr inbounds (i8, i8* @glb, i64 2) to i32), %A], [32, %B], [0, %C]
+ %y = phi i32 [ptrtoint (ptr getelementptr inbounds (i8, ptr @glb, i64 2) to i32), %A], [32, %B], [0, %C]
%y.fr = freeze i32 %y
ret i32 %y.fr
}
}
declare void @use_i32(i32)
-declare void @use_p32(i32*)
+declare void @use_p32(ptr)
define i32 @and_freeze_undef_multipleuses(i32 %x) {
; CHECK-LABEL: @and_freeze_undef_multipleuses(
ret i32 %v3.fr
}
-define i1 @early_freeze_test2(i32* %ptr) {
+define i1 @early_freeze_test2(ptr %ptr) {
; CHECK-LABEL: @early_freeze_test2(
-; CHECK-NEXT: [[V1:%.*]] = load i32, i32* [[PTR:%.*]], align 4
+; CHECK-NEXT: [[V1:%.*]] = load i32, ptr [[PTR:%.*]], align 4
; CHECK-NEXT: [[V1_FR:%.*]] = freeze i32 [[V1]]
; CHECK-NEXT: [[V2:%.*]] = and i32 [[V1_FR]], 1
; CHECK-NEXT: [[COND:%.*]] = icmp eq i32 [[V2]], 0
; CHECK-NEXT: ret i1 [[COND]]
;
- %v1 = load i32, i32* %ptr
+ %v1 = load i32, ptr %ptr
%v2 = and i32 %v1, 1
%cond = icmp eq i32 %v2, 0
%cond.fr = freeze i1 %cond
; CHECK-NEXT: entry:
; CHECK-NEXT: [[A:%.*]] = alloca i32, align 4
; CHECK-NEXT: [[V_FR:%.*]] = freeze i32 [[V:%.*]]
-; CHECK-NEXT: call void @use_p32(i32* nonnull [[A]])
+; CHECK-NEXT: call void @use_p32(ptr nonnull [[A]])
; CHECK-NEXT: call void @use_i32(i32 [[V_FR]])
; CHECK-NEXT: [[COND:%.*]] = icmp eq i32 [[V_FR]], 0
; CHECK-NEXT: br i1 [[COND]], label [[BB0:%.*]], label [[BB1:%.*]]
;
entry:
%a = alloca i32
- call void @use_p32(i32* %a)
+ call void @use_p32(ptr %a)
call void @use_i32(i32 %v)
%cond = icmp eq i32 %v, 0
br i1 %cond, label %bb0, label %bb1
declare i32 @__CxxFrameHandler3(...)
-define void @freeze_dominated_uses_catchswitch(i1 %c, i32 %x) personality i8* bitcast (i32 (...)* @__CxxFrameHandler3 to i8*) {
+define void @freeze_dominated_uses_catchswitch(i1 %c, i32 %x) personality ptr @__CxxFrameHandler3 {
; CHECK-LABEL: @freeze_dominated_uses_catchswitch(
; CHECK-NEXT: entry:
; CHECK-NEXT: br i1 [[C:%.*]], label [[IF_THEN:%.*]], label [[IF_ELSE:%.*]]
; CHECK-NEXT: [[PHI:%.*]] = phi i32 [ 0, [[IF_THEN]] ], [ [[X:%.*]], [[IF_ELSE]] ]
; CHECK-NEXT: [[CS:%.*]] = catchswitch within none [label [[CATCH:%.*]], label %catch2] unwind to caller
; CHECK: catch:
-; CHECK-NEXT: [[CP:%.*]] = catchpad within [[CS]] [i8* null, i32 64, i8* null]
+; CHECK-NEXT: [[CP:%.*]] = catchpad within [[CS]] [ptr null, i32 64, ptr null]
; CHECK-NEXT: [[PHI_FREEZE:%.*]] = freeze i32 [[PHI]]
; CHECK-NEXT: call void @use_i32(i32 [[PHI_FREEZE]]) [ "funclet"(token [[CP]]) ]
; CHECK-NEXT: unreachable
; CHECK: catch2:
-; CHECK-NEXT: [[CP2:%.*]] = catchpad within [[CS]] [i8* null, i32 64, i8* null]
+; CHECK-NEXT: [[CP2:%.*]] = catchpad within [[CS]] [ptr null, i32 64, ptr null]
; CHECK-NEXT: call void @use_i32(i32 [[PHI]]) [ "funclet"(token [[CP2]]) ]
; CHECK-NEXT: unreachable
; CHECK: cleanup:
%cs = catchswitch within none [label %catch, label %catch2] unwind to caller
catch:
- %cp = catchpad within %cs [i8* null, i32 64, i8* null]
+ %cp = catchpad within %cs [ptr null, i32 64, ptr null]
%phi.freeze = freeze i32 %phi
call void @use_i32(i32 %phi.freeze) [ "funclet"(token %cp) ]
unreachable
catch2:
- %cp2 = catchpad within %cs [i8* null, i32 64, i8* null]
+ %cp2 = catchpad within %cs [ptr null, i32 64, ptr null]
call void @use_i32(i32 %phi) [ "funclet"(token %cp2) ]
unreachable
ret i32 %phi
}
-define i32 @freeze_invoke_use_in_phi(i1 %c) personality i8* undef {
+define i32 @freeze_invoke_use_in_phi(i1 %c) personality ptr undef {
; CHECK-LABEL: @freeze_invoke_use_in_phi(
; CHECK-NEXT: entry:
; CHECK-NEXT: [[X:%.*]] = invoke i32 @get_i32()
unreachable
}
-define i32 @freeze_invoke_use_after_phi(i1 %c) personality i8* undef {
+define i32 @freeze_invoke_use_after_phi(i1 %c) personality ptr undef {
; CHECK-LABEL: @freeze_invoke_use_after_phi(
; CHECK-NEXT: entry:
; CHECK-NEXT: [[X:%.*]] = invoke i32 @get_i32()
ret i32 %v1.fr
}
-define i8* @propagate_drop_gep1(i8* %arg) {
+define ptr @propagate_drop_gep1(ptr %arg) {
; CHECK-LABEL: @propagate_drop_gep1(
-; CHECK-NEXT: [[ARG_FR:%.*]] = freeze i8* [[ARG:%.*]]
-; CHECK-NEXT: [[V1:%.*]] = getelementptr i8, i8* [[ARG_FR]], i64 16
-; CHECK-NEXT: ret i8* [[V1]]
+; CHECK-NEXT: [[ARG_FR:%.*]] = freeze ptr [[ARG:%.*]]
+; CHECK-NEXT: [[V1:%.*]] = getelementptr i8, ptr [[ARG_FR]], i64 16
+; CHECK-NEXT: ret ptr [[V1]]
;
- %v1 = getelementptr inbounds i8, i8* %arg, i64 16
- %v1.fr = freeze i8* %v1
- ret i8* %v1.fr
+ %v1 = getelementptr inbounds i8, ptr %arg, i64 16
+ %v1.fr = freeze ptr %v1
+ ret ptr %v1.fr
}
define float @propagate_drop_fneg(float %arg) {
ret void
}
-define void @fold_phi_gep(i8* %init, i8* %end) {
+define void @fold_phi_gep(ptr %init, ptr %end) {
; CHECK-LABEL: @fold_phi_gep(
; CHECK-NEXT: entry:
-; CHECK-NEXT: [[PHI_FR:%.*]] = freeze i8* [[INIT:%.*]]
+; CHECK-NEXT: [[PHI_FR:%.*]] = freeze ptr [[INIT:%.*]]
; CHECK-NEXT: br label [[LOOP:%.*]]
; CHECK: loop:
-; CHECK-NEXT: [[I:%.*]] = phi i8* [ [[PHI_FR]], [[ENTRY:%.*]] ], [ [[I_NEXT:%.*]], [[LOOP]] ]
-; CHECK-NEXT: [[I_NEXT]] = getelementptr i8, i8* [[I]], i64 1
-; CHECK-NEXT: [[COND:%.*]] = icmp eq i8* [[I_NEXT]], [[END:%.*]]
+; CHECK-NEXT: [[I:%.*]] = phi ptr [ [[PHI_FR]], [[ENTRY:%.*]] ], [ [[I_NEXT:%.*]], [[LOOP]] ]
+; CHECK-NEXT: [[I_NEXT]] = getelementptr i8, ptr [[I]], i64 1
+; CHECK-NEXT: [[COND:%.*]] = icmp eq ptr [[I_NEXT]], [[END:%.*]]
; CHECK-NEXT: br i1 [[COND]], label [[LOOP]], label [[EXIT:%.*]]
; CHECK: exit:
; CHECK-NEXT: ret void
br label %loop
loop: ; preds = %loop, %entry
- %i = phi i8* [ %init, %entry ], [ %i.next, %loop ]
- %i.fr = freeze i8* %i
- %i.next = getelementptr i8, i8* %i.fr, i64 1
- %cond = icmp eq i8* %i.next, %end
+ %i = phi ptr [ %init, %entry ], [ %i.next, %loop ]
+ %i.fr = freeze ptr %i
+ %i.next = getelementptr i8, ptr %i.fr, i64 1
+ %cond = icmp eq ptr %i.next, %end
br i1 %cond, label %loop, label %exit
exit: ; preds = %loop
ret void
}
-define void @fold_phi_invoke_start_value(i32 %n) personality i8* undef {
+define void @fold_phi_invoke_start_value(i32 %n) personality ptr undef {
; CHECK-LABEL: @fold_phi_invoke_start_value(
; CHECK-NEXT: entry:
; CHECK-NEXT: [[INIT:%.*]] = invoke i32 @get_i32()
ret void
}
-define void @fold_phi_invoke_noundef_start_value(i32 %n) personality i8* undef {
+define void @fold_phi_invoke_noundef_start_value(i32 %n) personality ptr undef {
; CHECK-LABEL: @fold_phi_invoke_noundef_start_value(
; CHECK-NEXT: entry:
; CHECK-NEXT: [[INIT:%.*]] = invoke noundef i32 @get_i32()
ret void
}
-define i8* @freeze_load_noundef(i8** %ptr) {
+define ptr @freeze_load_noundef(ptr %ptr) {
; CHECK-LABEL: @freeze_load_noundef(
-; CHECK-NEXT: [[P:%.*]] = load i8*, i8** [[PTR:%.*]], align 8, !noundef !0
-; CHECK-NEXT: ret i8* [[P]]
+; CHECK-NEXT: [[P:%.*]] = load ptr, ptr [[PTR:%.*]], align 8, !noundef !0
+; CHECK-NEXT: ret ptr [[P]]
;
- %p = load i8*, i8** %ptr, !noundef !0
- %p.fr = freeze i8* %p
- ret i8* %p.fr
+ %p = load ptr, ptr %ptr, !noundef !0
+ %p.fr = freeze ptr %p
+ ret ptr %p.fr
}
-define i8* @freeze_load_dereferenceable(i8** %ptr) {
+define ptr @freeze_load_dereferenceable(ptr %ptr) {
; CHECK-LABEL: @freeze_load_dereferenceable(
-; CHECK-NEXT: [[P:%.*]] = load i8*, i8** [[PTR:%.*]], align 8, !dereferenceable !1
-; CHECK-NEXT: ret i8* [[P]]
+; CHECK-NEXT: [[P:%.*]] = load ptr, ptr [[PTR:%.*]], align 8, !dereferenceable !1
+; CHECK-NEXT: ret ptr [[P]]
;
- %p = load i8*, i8** %ptr, !dereferenceable !1
- %p.fr = freeze i8* %p
- ret i8* %p.fr
+ %p = load ptr, ptr %ptr, !dereferenceable !1
+ %p.fr = freeze ptr %p
+ ret ptr %p.fr
}
-define i8* @freeze_load_dereferenceable_or_null(i8** %ptr) {
+define ptr @freeze_load_dereferenceable_or_null(ptr %ptr) {
; CHECK-LABEL: @freeze_load_dereferenceable_or_null(
-; CHECK-NEXT: [[P:%.*]] = load i8*, i8** [[PTR:%.*]], align 8, !dereferenceable_or_null !1
-; CHECK-NEXT: ret i8* [[P]]
+; CHECK-NEXT: [[P:%.*]] = load ptr, ptr [[PTR:%.*]], align 8, !dereferenceable_or_null !1
+; CHECK-NEXT: ret ptr [[P]]
;
- %p = load i8*, i8** %ptr, !dereferenceable_or_null !1
- %p.fr = freeze i8* %p
- ret i8* %p.fr
+ %p = load ptr, ptr %ptr, !dereferenceable_or_null !1
+ %p.fr = freeze ptr %p
+ ret ptr %p.fr
}
!0 = !{}
define i33 @fshr_constant_shift_amount_modulo_bitwidth_constexpr(i33 %x, i33 %y) {
; CHECK-LABEL: @fshr_constant_shift_amount_modulo_bitwidth_constexpr(
-; CHECK-NEXT: [[R:%.*]] = call i33 @llvm.fshr.i33(i33 [[X:%.*]], i33 [[Y:%.*]], i33 ptrtoint (i8* @external_global to i33))
+; CHECK-NEXT: [[R:%.*]] = call i33 @llvm.fshr.i33(i33 [[X:%.*]], i33 [[Y:%.*]], i33 ptrtoint (ptr @external_global to i33))
; CHECK-NEXT: ret i33 [[R]]
;
- %shamt = ptrtoint i8* @external_global to i33
+ %shamt = ptrtoint ptr @external_global to i33
%r = call i33 @llvm.fshr.i33(i33 %x, i33 %y, i33 %shamt)
ret i33 %r
}
define <2 x i31> @fshl_constant_shift_amount_modulo_bitwidth_vec_const_expr(<2 x i31> %x, <2 x i31> %y) {
; CHECK-LABEL: @fshl_constant_shift_amount_modulo_bitwidth_vec_const_expr(
-; CHECK-NEXT: [[R:%.*]] = call <2 x i31> @llvm.fshl.v2i31(<2 x i31> [[X:%.*]], <2 x i31> [[Y:%.*]], <2 x i31> <i31 34, i31 ptrtoint (i8* @external_global to i31)>)
+; CHECK-NEXT: [[R:%.*]] = call <2 x i31> @llvm.fshl.v2i31(<2 x i31> [[X:%.*]], <2 x i31> [[Y:%.*]], <2 x i31> <i31 34, i31 ptrtoint (ptr @external_global to i31)>)
; CHECK-NEXT: ret <2 x i31> [[R]]
;
- %shamt = ptrtoint i8* @external_global to i31
- %r = call <2 x i31> @llvm.fshl.v2i31(<2 x i31> %x, <2 x i31> %y, <2 x i31> <i31 34, i31 ptrtoint (i8* @external_global to i31)>)
+ %shamt = ptrtoint ptr @external_global to i31
+ %r = call <2 x i31> @llvm.fshl.v2i31(<2 x i31> %x, <2 x i31> %y, <2 x i31> <i31 34, i31 ptrtoint (ptr @external_global to i31)>)
ret <2 x i31> %r
}
define float @PR37605(float %conv) {
; CHECK-LABEL: @PR37605(
-; CHECK-NEXT: [[SUB:%.*]] = fsub float [[CONV:%.*]], bitcast (i32 ptrtoint (i16* @b to i32) to float)
+; CHECK-NEXT: [[SUB:%.*]] = fsub float [[CONV:%.*]], bitcast (i32 ptrtoint (ptr @b to i32) to float)
; CHECK-NEXT: ret float [[SUB]]
;
- %sub = fsub float %conv, bitcast (i32 ptrtoint (i16* @b to i32) to float)
+ %sub = fsub float %conv, bitcast (i32 ptrtoint (ptr @b to i32) to float)
ret float %sub
}
@str = constant [1 x i8] zeroinitializer
@empty = constant [0 x i8] zeroinitializer
-declare i64 @fwrite(i8*, i64, i64, %FILE *)
+declare i64 @fwrite(ptr, i64, i64, ptr)
; Check fwrite(S, 1, 1, fp) -> fputc(S[0], fp).
-define void @test_simplify1(%FILE* %fp) {
+define void @test_simplify1(ptr %fp) {
; CHECK-LABEL: @test_simplify1(
- %str = getelementptr inbounds [1 x i8], [1 x i8]* @str, i64 0, i64 0
- call i64 @fwrite(i8* %str, i64 1, i64 1, %FILE* %fp)
-; CHECK-NEXT: call i32 @fputc(i32 0, %FILE* %fp)
+ call i64 @fwrite(ptr @str, i64 1, i64 1, ptr %fp)
+; CHECK-NEXT: call i32 @fputc(i32 0, ptr %fp)
ret void
; CHECK-NEXT: ret void
}
-define void @test_simplify2(%FILE* %fp) {
+define void @test_simplify2(ptr %fp) {
; CHECK-LABEL: @test_simplify2(
- %str = getelementptr inbounds [0 x i8], [0 x i8]* @empty, i64 0, i64 0
- call i64 @fwrite(i8* %str, i64 1, i64 0, %FILE* %fp)
+ call i64 @fwrite(ptr @empty, i64 1, i64 0, ptr %fp)
ret void
; CHECK-NEXT: ret void
}
-define void @test_simplify3(%FILE* %fp) {
+define void @test_simplify3(ptr %fp) {
; CHECK-LABEL: @test_simplify3(
- %str = getelementptr inbounds [0 x i8], [0 x i8]* @empty, i64 0, i64 0
- call i64 @fwrite(i8* %str, i64 0, i64 1, %FILE* %fp)
+ call i64 @fwrite(ptr @empty, i64 0, i64 1, ptr %fp)
ret void
; CHECK-NEXT: ret void
}
-define i64 @test_no_simplify1(%FILE* %fp) {
+define i64 @test_no_simplify1(ptr %fp) {
; CHECK-LABEL: @test_no_simplify1(
- %str = getelementptr inbounds [1 x i8], [1 x i8]* @str, i64 0, i64 0
- %ret = call i64 @fwrite(i8* %str, i64 1, i64 1, %FILE* %fp)
+ %ret = call i64 @fwrite(ptr @str, i64 1, i64 1, ptr %fp)
; CHECK-NEXT: call i64 @fwrite
ret i64 %ret
; CHECK-NEXT: ret i64 %ret
}
-define void @test_no_simplify2(%FILE* %fp, i64 %size) {
+define void @test_no_simplify2(ptr %fp, i64 %size) {
; CHECK-LABEL: @test_no_simplify2(
- %str = getelementptr inbounds [1 x i8], [1 x i8]* @str, i64 0, i64 0
- call i64 @fwrite(i8* %str, i64 %size, i64 1, %FILE* %fp)
+ call i64 @fwrite(ptr @str, i64 %size, i64 1, ptr %fp)
; CHECK-NEXT: call i64 @fwrite
ret void
; CHECK-NEXT: ret void
unreach:
; CHECK: token undef
- %token_call = call i32 addrspace(1)* @llvm.experimental.gc.relocate.p1i32(token undef, i32 0, i32 0)
+ %token_call = call ptr addrspace(1) @llvm.experimental.gc.relocate.p1(token undef, i32 0, i32 0)
ret i32 1
}
-declare i32 addrspace(1)* @llvm.experimental.gc.relocate.p1i32(token, i32, i32)
+declare ptr addrspace(1) @llvm.experimental.gc.relocate.p1(token, i32, i32)
; then the return attribute of gc.relocate is dereferenceable(N).
declare zeroext i1 @return_i1()
-declare token @llvm.experimental.gc.statepoint.p0f_i1f(i64, i32, i1 ()*, i32, i32, ...)
-declare i32 addrspace(1)* @llvm.experimental.gc.relocate.p1i32(token, i32, i32)
+declare token @llvm.experimental.gc.statepoint.p0(i64, i32, ptr, i32, i32, ...)
+declare ptr addrspace(1) @llvm.experimental.gc.relocate.p1(token, i32, i32)
-define i32 @explicit_nonnull(i32 addrspace(1)* nonnull %dparam) gc "statepoint-example" {
+define i32 @explicit_nonnull(ptr addrspace(1) nonnull %dparam) gc "statepoint-example" {
; Checks that a nonnull pointer
; CHECK-LABEL: @explicit_nonnull
; CHECK: ret i32 1
entry:
- %load = load i32, i32 addrspace(1)* %dparam
- %tok = tail call token (i64, i32, i1 ()*, i32, i32, ...) @llvm.experimental.gc.statepoint.p0f_i1f(i64 0, i32 0, i1 ()* elementtype(i1 ()) @return_i1, i32 0, i32 0, i32 0, i32 0) ["gc-live"(i32 addrspace(1)* %dparam)]
- %relocate = call i32 addrspace(1)* @llvm.experimental.gc.relocate.p1i32(token %tok, i32 0, i32 0)
- %cmp = icmp eq i32 addrspace(1)* %relocate, null
+ %load = load i32, ptr addrspace(1) %dparam
+ %tok = tail call token (i64, i32, ptr, i32, i32, ...) @llvm.experimental.gc.statepoint.p0(i64 0, i32 0, ptr elementtype(i1 ()) @return_i1, i32 0, i32 0, i32 0, i32 0) ["gc-live"(ptr addrspace(1) %dparam)]
+ %relocate = call ptr addrspace(1) @llvm.experimental.gc.relocate.p1(token %tok, i32 0, i32 0)
+ %cmp = icmp eq ptr addrspace(1) %relocate, null
%ret_val = select i1 %cmp, i32 0, i32 1
ret i32 %ret_val
}
-define i32 @implicit_nonnull(i32 addrspace(1)* %dparam) gc "statepoint-example" {
+define i32 @implicit_nonnull(ptr addrspace(1) %dparam) gc "statepoint-example" {
; Checks that a nonnull pointer
; CHECK-LABEL: @implicit_nonnull
; CHECK: ret i32 1
entry:
- %cond = icmp eq i32 addrspace(1)* %dparam, null
+ %cond = icmp eq ptr addrspace(1) %dparam, null
br i1 %cond, label %no_gc, label %gc
gc:
- %load = load i32, i32 addrspace(1)* %dparam
- %tok = tail call token (i64, i32, i1 ()*, i32, i32, ...) @llvm.experimental.gc.statepoint.p0f_i1f(i64 0, i32 0, i1 ()* elementtype(i1 ()) @return_i1, i32 0, i32 0, i32 0, i32 0) ["gc-live"(i32 addrspace(1)* %dparam)]
- %relocate = call i32 addrspace(1)* @llvm.experimental.gc.relocate.p1i32(token %tok, i32 0, i32 0)
- %cmp = icmp eq i32 addrspace(1)* %relocate, null
+ %load = load i32, ptr addrspace(1) %dparam
+ %tok = tail call token (i64, i32, ptr, i32, i32, ...) @llvm.experimental.gc.statepoint.p0(i64 0, i32 0, ptr elementtype(i1 ()) @return_i1, i32 0, i32 0, i32 0, i32 0) ["gc-live"(ptr addrspace(1) %dparam)]
+ %relocate = call ptr addrspace(1) @llvm.experimental.gc.relocate.p1(token %tok, i32 0, i32 0)
+ %cmp = icmp eq ptr addrspace(1) %relocate, null
%ret_val = select i1 %cmp, i32 0, i32 1
ret i32 %ret_val
no_gc:
; Make sure we don't crash when processing vectors
-define <2 x i8 addrspace(1)*> @vector(<2 x i8 addrspace(1)*> %obj) gc "statepoint-example" {
+define <2 x ptr addrspace(1)> @vector(<2 x ptr addrspace(1)> %obj) gc "statepoint-example" {
entry:
; CHECK-LABEL: @vector
; CHECK: gc.statepoint
; CHECK: gc.relocate
- %safepoint_token = call token (i64, i32, void ()*, i32, i32, ...) @llvm.experimental.gc.statepoint.p0f_isVoidf(i64 0, i32 0, void ()* elementtype(void ()) @do_safepoint, i32 0, i32 0, i32 0, i32 0) ["gc-live"(<2 x i8 addrspace(1)*> %obj)]
- %obj.relocated = call coldcc <2 x i8 addrspace(1)*> @llvm.experimental.gc.relocate.v2p1i8(token %safepoint_token, i32 0, i32 0) ; (%obj, %obj)
- ret <2 x i8 addrspace(1)*> %obj.relocated
+ %safepoint_token = call token (i64, i32, ptr, i32, i32, ...) @llvm.experimental.gc.statepoint.p0(i64 0, i32 0, ptr elementtype(void ()) @do_safepoint, i32 0, i32 0, i32 0, i32 0) ["gc-live"(<2 x ptr addrspace(1)> %obj)]
+ %obj.relocated = call coldcc <2 x ptr addrspace(1)> @llvm.experimental.gc.relocate.v2p1(token %safepoint_token, i32 0, i32 0) ; (%obj, %obj)
+ ret <2 x ptr addrspace(1)> %obj.relocated
}
-define i32 addrspace(1)* @canonical_base(i32 addrspace(1)* %dparam) gc "statepoint-example" {
+define ptr addrspace(1) @canonical_base(ptr addrspace(1) %dparam) gc "statepoint-example" {
; Checks that a nonnull pointer
; CHECK-LABEL: @canonical_base
; CHECK: (token %tok, i32 0, i32 0) ; (%dparam, %dparam)
entry:
- %tok = tail call token (i64, i32, i1 ()*, i32, i32, ...) @llvm.experimental.gc.statepoint.p0f_i1f(i64 0, i32 0, i1 ()* elementtype(i1 ()) @return_i1, i32 0, i32 0, i32 0, i32 0) ["gc-live"(i32 addrspace(1)* %dparam, i32 addrspace(1)* %dparam)]
- %relocate = call i32 addrspace(1)* @llvm.experimental.gc.relocate.p1i32(token %tok, i32 0, i32 1)
- ret i32 addrspace(1)* %relocate
+ %tok = tail call token (i64, i32, ptr, i32, i32, ...) @llvm.experimental.gc.statepoint.p0(i64 0, i32 0, ptr elementtype(i1 ()) @return_i1, i32 0, i32 0, i32 0, i32 0) ["gc-live"(ptr addrspace(1) %dparam, ptr addrspace(1) %dparam)]
+ %relocate = call ptr addrspace(1) @llvm.experimental.gc.relocate.p1(token %tok, i32 0, i32 1)
+ ret ptr addrspace(1) %relocate
}
declare void @do_safepoint()
-declare token @llvm.experimental.gc.statepoint.p0f_isVoidf(i64, i32, void ()*, i32, i32, ...)
-declare i8 addrspace(1)* @llvm.experimental.gc.relocate.p1i8(token, i32, i32)
-declare <2 x i8 addrspace(1)*> @llvm.experimental.gc.relocate.v2p1i8(token, i32, i32)
+declare <2 x ptr addrspace(1)> @llvm.experimental.gc.relocate.v2p1(token, i32, i32)
target triple = "aarch64-unknown-linux-android10000"
@x.hwasan = private global { [3 x i32], [4 x i8] } { [3 x i32] [i32 42, i32 57, i32 10], [4 x i8] c"\00\00\00\87" }, align 16
-@x = alias [3 x i32], inttoptr (i64 add (i64 ptrtoint ({ [3 x i32], [4 x i8] }* @x.hwasan to i64), i64 -8718968878589280256) to [3 x i32]*)
+@x = alias [3 x i32], inttoptr (i64 add (i64 ptrtoint (ptr @x.hwasan to i64), i64 -8718968878589280256) to ptr)
define i32 @f(i64 %i) {
entry:
- ; CHECK: getelementptr inbounds [3 x i32], [3 x i32]* @x
- %arrayidx = getelementptr inbounds [3 x i32], [3 x i32]* @x, i64 0, i64 %i
- %0 = load i32, i32* %arrayidx
+ ; CHECK: getelementptr inbounds [3 x i32], ptr @x
+ %arrayidx = getelementptr inbounds [3 x i32], ptr @x, i64 0, i64 %i
+ %0 = load i32, ptr %arrayidx
ret i32 %0
}
declare void @do_something(<vscale x 4 x i32> %x)
-define void @can_replace_gep_idx_with_zero_typesize(i64 %n, <vscale x 4 x i32>* %a, i64 %b) {
- %idx = getelementptr <vscale x 4 x i32>, <vscale x 4 x i32>* %a, i64 %b
- %tmp = load <vscale x 4 x i32>, <vscale x 4 x i32>* %idx
+define void @can_replace_gep_idx_with_zero_typesize(i64 %n, ptr %a, i64 %b) {
+ %idx = getelementptr <vscale x 4 x i32>, ptr %a, i64 %b
+ %tmp = load <vscale x 4 x i32>, ptr %idx
call void @do_something(<vscale x 4 x i32> %tmp)
ret void
}
; The constant-indexed GEP instruction should be swapped to the end, even
; without merging.
-; result = (((i32*) p + a) + b) + 1
+; result = (((ptr) p + a) + b) + 1
define ptr @basic(ptr %p, i64 %a, i64 %b) {
; CHECK-LABEL: @basic(
; CHECK-NEXT: [[TMP1:%.*]] = getelementptr inbounds i32, ptr [[P:%.*]], i64 1
}
; Constant-indexed GEP are merged after swawpping.
-; result = ((i32*) p + a) + 3
+; result = ((ptr) p + a) + 3
define ptr @merge(ptr %p, i64 %a) {
; CHECK-LABEL: @merge(
; CHECK-NEXT: [[TMP1:%.*]] = getelementptr inbounds i32, ptr [[P:%.*]], i64 1
; Multiple constant-indexed GEP. Note that the first two cannot be merged at
; first, but after the second and third are merged, the result can be merged
; with the first one on the next pass.
-; result = (<3 x i32>*) ((i16*) ((i8*) ptr + a) + (a * b)) + 9
+; result = (ptr) ((ptr) ((ptr) ptr + a) + (a * b)) + 9
define ptr @nested(ptr %p, i64 %a, i64 %b) {
; CHECK-LABEL: @nested(
; CHECK-NEXT: [[TMP1:%.*]] = getelementptr inbounds <3 x i32>, ptr [[P:%.*]], i64 1
; CHECK-NEXT: ret i1 false
;
entry:
- %gep = getelementptr inbounds i8, i8* null, i64 0
- %cnd = icmp ne i8* %gep, null
+ %cnd = icmp ne ptr null, null
ret i1 %cnd
}
; CHECK-NEXT: ret i1 true
;
entry:
- %gep = getelementptr inbounds i8, i8* null, i64 1
- %cnd = icmp ne i8* %gep, null
+ %gep = getelementptr inbounds i8, ptr null, i64 1
+ %cnd = icmp ne ptr %gep, null
ret i1 %cnd
}
; CHECK-NEXT: ret i1 true
;
entry:
- %gep = getelementptr inbounds i8, i8* null, i64 0
- %cnd = icmp eq i8* %gep, null
+ %cnd = icmp eq ptr null, null
ret i1 %cnd
}
; CHECK-NEXT: ret i1 false
;
entry:
- %gep = getelementptr inbounds i8, i8* null, i64 1
- %cnd = icmp eq i8* %gep, null
+ %gep = getelementptr inbounds i8, ptr null, i64 1
+ %cnd = icmp eq ptr %gep, null
ret i1 %cnd
}
;; Then show the results for non-constants. These use the inbounds provided
;; UB fact to ignore the possible overflow cases.
-define i1 @test_ne(i8* %base, i64 %idx) {
+define i1 @test_ne(ptr %base, i64 %idx) {
; CHECK-LABEL: @test_ne(
; CHECK-NEXT: entry:
-; CHECK-NEXT: [[CND:%.*]] = icmp ne i8* [[BASE:%.*]], null
+; CHECK-NEXT: [[CND:%.*]] = icmp ne ptr [[BASE:%.*]], null
; CHECK-NEXT: ret i1 [[CND]]
;
entry:
- %gep = getelementptr inbounds i8, i8* %base, i64 %idx
- %cnd = icmp ne i8* %gep, null
+ %gep = getelementptr inbounds i8, ptr %base, i64 %idx
+ %cnd = icmp ne ptr %gep, null
ret i1 %cnd
}
-define i1 @test_eq(i8* %base, i64 %idx) {
+define i1 @test_eq(ptr %base, i64 %idx) {
; CHECK-LABEL: @test_eq(
; CHECK-NEXT: entry:
-; CHECK-NEXT: [[CND:%.*]] = icmp eq i8* [[BASE:%.*]], null
+; CHECK-NEXT: [[CND:%.*]] = icmp eq ptr [[BASE:%.*]], null
; CHECK-NEXT: ret i1 [[CND]]
;
entry:
- %gep = getelementptr inbounds i8, i8* %base, i64 %idx
- %cnd = icmp eq i8* %gep, null
+ %gep = getelementptr inbounds i8, ptr %base, i64 %idx
+ %cnd = icmp eq ptr %gep, null
ret i1 %cnd
}
-define <2 x i1> @test_vector_base(<2 x i8*> %base, i64 %idx) {
+define <2 x i1> @test_vector_base(<2 x ptr> %base, i64 %idx) {
; CHECK-LABEL: @test_vector_base(
; CHECK-NEXT: entry:
-; CHECK-NEXT: [[CND:%.*]] = icmp eq <2 x i8*> [[BASE:%.*]], zeroinitializer
+; CHECK-NEXT: [[CND:%.*]] = icmp eq <2 x ptr> [[BASE:%.*]], zeroinitializer
; CHECK-NEXT: ret <2 x i1> [[CND]]
;
entry:
- %gep = getelementptr inbounds i8, <2 x i8*> %base, i64 %idx
- %cnd = icmp eq <2 x i8*> %gep, zeroinitializer
+ %gep = getelementptr inbounds i8, <2 x ptr> %base, i64 %idx
+ %cnd = icmp eq <2 x ptr> %gep, zeroinitializer
ret <2 x i1> %cnd
}
-define <2 x i1> @test_vector_index(i8* %base, <2 x i64> %idx) {
+define <2 x i1> @test_vector_index(ptr %base, <2 x i64> %idx) {
; CHECK-LABEL: @test_vector_index(
; CHECK-NEXT: entry:
-; CHECK-NEXT: [[DOTSPLATINSERT:%.*]] = insertelement <2 x i8*> poison, i8* [[BASE:%.*]], i64 0
-; CHECK-NEXT: [[TMP0:%.*]] = icmp eq <2 x i8*> [[DOTSPLATINSERT]], zeroinitializer
+; CHECK-NEXT: [[DOTSPLATINSERT:%.*]] = insertelement <2 x ptr> poison, ptr [[BASE:%.*]], i64 0
+; CHECK-NEXT: [[TMP0:%.*]] = icmp eq <2 x ptr> [[DOTSPLATINSERT]], zeroinitializer
; CHECK-NEXT: [[CND:%.*]] = shufflevector <2 x i1> [[TMP0]], <2 x i1> poison, <2 x i32> zeroinitializer
; CHECK-NEXT: ret <2 x i1> [[CND]]
;
entry:
- %gep = getelementptr inbounds i8, i8* %base, <2 x i64> %idx
- %cnd = icmp eq <2 x i8*> %gep, zeroinitializer
+ %gep = getelementptr inbounds i8, ptr %base, <2 x i64> %idx
+ %cnd = icmp eq <2 x ptr> %gep, zeroinitializer
ret <2 x i1> %cnd
}
-define <2 x i1> @test_vector_both(<2 x i8*> %base, <2 x i64> %idx) {
+define <2 x i1> @test_vector_both(<2 x ptr> %base, <2 x i64> %idx) {
; CHECK-LABEL: @test_vector_both(
; CHECK-NEXT: entry:
-; CHECK-NEXT: [[CND:%.*]] = icmp eq <2 x i8*> [[BASE:%.*]], zeroinitializer
+; CHECK-NEXT: [[CND:%.*]] = icmp eq <2 x ptr> [[BASE:%.*]], zeroinitializer
; CHECK-NEXT: ret <2 x i1> [[CND]]
;
entry:
- %gep = getelementptr inbounds i8, <2 x i8*> %base, <2 x i64> %idx
- %cnd = icmp eq <2 x i8*> %gep, zeroinitializer
+ %gep = getelementptr inbounds i8, <2 x ptr> %base, <2 x i64> %idx
+ %cnd = icmp eq <2 x ptr> %gep, zeroinitializer
ret <2 x i1> %cnd
}
;; These two show instsimplify's reasoning getting to the non-zero offsets
;; before instcombine does.
-define i1 @test_eq_pos_idx(i8* %base) {
+define i1 @test_eq_pos_idx(ptr %base) {
; CHECK-LABEL: @test_eq_pos_idx(
; CHECK-NEXT: entry:
; CHECK-NEXT: ret i1 false
;
entry:
- %gep = getelementptr inbounds i8, i8* %base, i64 1
- %cnd = icmp eq i8* %gep, null
+ %gep = getelementptr inbounds i8, ptr %base, i64 1
+ %cnd = icmp eq ptr %gep, null
ret i1 %cnd
}
-define i1 @test_eq_neg_idx(i8* %base) {
+define i1 @test_eq_neg_idx(ptr %base) {
; CHECK-LABEL: @test_eq_neg_idx(
; CHECK-NEXT: entry:
; CHECK-NEXT: ret i1 false
;
entry:
- %gep = getelementptr inbounds i8, i8* %base, i64 -1
- %cnd = icmp eq i8* %gep, null
+ %gep = getelementptr inbounds i8, ptr %base, i64 -1
+ %cnd = icmp eq ptr %gep, null
ret i1 %cnd
}
;; a cornercase which keeps getting mentioned. The GEP
;; produces %base regardless of the value of the index
;; expression.
-define i1 @test_size0({}* %base, i64 %idx) {
+define i1 @test_size0(ptr %base, i64 %idx) {
; CHECK-LABEL: @test_size0(
; CHECK-NEXT: entry:
-; CHECK-NEXT: [[CND:%.*]] = icmp ne {}* [[BASE:%.*]], null
+; CHECK-NEXT: [[CND:%.*]] = icmp ne ptr [[BASE:%.*]], null
; CHECK-NEXT: ret i1 [[CND]]
;
entry:
- %gep = getelementptr inbounds {}, {}* %base, i64 %idx
- %cnd = icmp ne {}* %gep, null
+ %gep = getelementptr inbounds {}, ptr %base, i64 %idx
+ %cnd = icmp ne ptr %gep, null
ret i1 %cnd
}
-define i1 @test_size0_nonzero_offset({}* %base) {
+define i1 @test_size0_nonzero_offset(ptr %base) {
; CHECK-LABEL: @test_size0_nonzero_offset(
; CHECK-NEXT: entry:
-; CHECK-NEXT: [[CND:%.*]] = icmp ne {}* [[BASE:%.*]], null
+; CHECK-NEXT: [[CND:%.*]] = icmp ne ptr [[BASE:%.*]], null
; CHECK-NEXT: ret i1 [[CND]]
;
entry:
- %gep = getelementptr inbounds {}, {}* %base, i64 15
- %cnd = icmp ne {}* %gep, null
+ %gep = getelementptr inbounds {}, ptr %base, i64 15
+ %cnd = icmp ne ptr %gep, null
ret i1 %cnd
}
-define i1 @test_index_type([10 x i8]* %base, i64 %idx) {
+define i1 @test_index_type(ptr %base, i64 %idx) {
; CHECK-LABEL: @test_index_type(
; CHECK-NEXT: entry:
-; CHECK-NEXT: [[CND:%.*]] = icmp eq [10 x i8]* [[BASE:%.*]], null
+; CHECK-NEXT: [[CND:%.*]] = icmp eq ptr [[BASE:%.*]], null
; CHECK-NEXT: ret i1 [[CND]]
;
entry:
- %gep = getelementptr inbounds [10 x i8], [10 x i8]* %base, i64 %idx, i64 %idx
- %cnd = icmp eq i8* %gep, null
+ %gep = getelementptr inbounds [10 x i8], ptr %base, i64 %idx, i64 %idx
+ %cnd = icmp eq ptr %gep, null
ret i1 %cnd
}
;; Finally, some negative tests for basic correctness checking.
-define i1 @neq_noinbounds(i8* %base, i64 %idx) {
+define i1 @neq_noinbounds(ptr %base, i64 %idx) {
; CHECK-LABEL: @neq_noinbounds(
; CHECK-NEXT: entry:
-; CHECK-NEXT: [[GEP:%.*]] = getelementptr i8, i8* [[BASE:%.*]], i64 [[IDX:%.*]]
-; CHECK-NEXT: [[CND:%.*]] = icmp ne i8* [[GEP]], null
+; CHECK-NEXT: [[GEP:%.*]] = getelementptr i8, ptr [[BASE:%.*]], i64 [[IDX:%.*]]
+; CHECK-NEXT: [[CND:%.*]] = icmp ne ptr [[GEP]], null
; CHECK-NEXT: ret i1 [[CND]]
;
entry:
- %gep = getelementptr i8, i8* %base, i64 %idx
- %cnd = icmp ne i8* %gep, null
+ %gep = getelementptr i8, ptr %base, i64 %idx
+ %cnd = icmp ne ptr %gep, null
ret i1 %cnd
}
-define i1 @neg_objectatnull(i8 addrspace(2)* %base, i64 %idx) {
+define i1 @neg_objectatnull(ptr addrspace(2) %base, i64 %idx) {
; CHECK-LABEL: @neg_objectatnull(
; CHECK-NEXT: entry:
-; CHECK-NEXT: [[GEP:%.*]] = getelementptr inbounds i8, i8 addrspace(2)* [[BASE:%.*]], i64 [[IDX:%.*]]
-; CHECK-NEXT: [[CND:%.*]] = icmp eq i8 addrspace(2)* [[GEP]], null
+; CHECK-NEXT: [[GEP:%.*]] = getelementptr inbounds i8, ptr addrspace(2) [[BASE:%.*]], i64 [[IDX:%.*]]
+; CHECK-NEXT: [[CND:%.*]] = icmp eq ptr addrspace(2) [[GEP]], null
; CHECK-NEXT: ret i1 [[CND]]
;
entry:
- %gep = getelementptr inbounds i8, i8 addrspace(2)* %base, i64 %idx
- %cnd = icmp eq i8 addrspace(2)* %gep, null
+ %gep = getelementptr inbounds i8, ptr addrspace(2) %base, i64 %idx
+ %cnd = icmp eq ptr addrspace(2) %gep, null
ret i1 %cnd
}
; bitcast between different address spaces. The addrspacecast is
; stripped off and the addrspace(0) null can be treated as invalid.
; FIXME: This should be able to fold to ret i1 false
-define i1 @invalid_bitcast_icmp_addrspacecast_as0_null(i32 addrspace(5)* %ptr) {
+define i1 @invalid_bitcast_icmp_addrspacecast_as0_null(ptr addrspace(5) %ptr) {
; CHECK-LABEL: @invalid_bitcast_icmp_addrspacecast_as0_null(
; CHECK-NEXT: bb:
-; CHECK-NEXT: [[TMP2:%.*]] = icmp eq i32 addrspace(5)* [[PTR:%.*]], addrspacecast (i32* null to i32 addrspace(5)*)
+; CHECK-NEXT: [[TMP2:%.*]] = icmp eq ptr addrspace(5) [[PTR:%.*]], addrspacecast (ptr null to ptr addrspace(5))
; CHECK-NEXT: ret i1 [[TMP2]]
;
bb:
- %tmp1 = getelementptr inbounds i32, i32 addrspace(5)* %ptr, i32 1
- %tmp2 = icmp eq i32 addrspace(5)* %tmp1, addrspacecast (i32* null to i32 addrspace(5)*)
+ %tmp1 = getelementptr inbounds i32, ptr addrspace(5) %ptr, i32 1
+ %tmp2 = icmp eq ptr addrspace(5) %tmp1, addrspacecast (ptr null to ptr addrspace(5))
ret i1 %tmp2
}
-define i1 @invalid_bitcast_icmp_addrspacecast_as0_null_var(i32 addrspace(5)* %ptr, i32 %idx) {
+define i1 @invalid_bitcast_icmp_addrspacecast_as0_null_var(ptr addrspace(5) %ptr, i32 %idx) {
; CHECK-LABEL: @invalid_bitcast_icmp_addrspacecast_as0_null_var(
; CHECK-NEXT: bb:
-; CHECK-NEXT: [[TMP2:%.*]] = icmp eq i32 addrspace(5)* [[PTR:%.*]], addrspacecast (i32* null to i32 addrspace(5)*)
+; CHECK-NEXT: [[TMP2:%.*]] = icmp eq ptr addrspace(5) [[PTR:%.*]], addrspacecast (ptr null to ptr addrspace(5))
; CHECK-NEXT: ret i1 [[TMP2]]
;
bb:
- %tmp1 = getelementptr inbounds i32, i32 addrspace(5)* %ptr, i32 %idx
- %tmp2 = icmp eq i32 addrspace(5)* %tmp1, addrspacecast (i32* null to i32 addrspace(5)*)
+ %tmp1 = getelementptr inbounds i32, ptr addrspace(5) %ptr, i32 %idx
+ %tmp2 = icmp eq ptr addrspace(5) %tmp1, addrspacecast (ptr null to ptr addrspace(5))
ret i1 %tmp2
}
%struct.B = type { i8, [3 x i16], %struct.A, float }
%struct.C = type { i8, i32, i32 }
-; result = (i32*) p + 3
+; result = (ptr) p + 3
define ptr @mergeBasic(ptr %p) {
; CHECK-LABEL: @mergeBasic(
; CHECK-NEXT: [[TMP1:%.*]] = getelementptr inbounds i32, ptr [[P:%.*]], i64 3
ret ptr %2
}
-; Converted to i8* and merged.
-; result = (i8*) p + 10
+; Converted to ptr and merged.
+; result = (ptr) p + 10
define ptr @mergeDifferentTypes(ptr %p) {
; CHECK-LABEL: @mergeDifferentTypes(
; CHECK-NEXT: [[TMP1:%.*]] = getelementptr inbounds i8, ptr [[P:%.*]], i64 10
ret ptr %2
}
-; Converted to i8* and merged.
-; result = (i8*) p + 10
+; Converted to ptr and merged.
+; result = (ptr) p + 10
define ptr @mergeReverse(ptr %p) {
; CHECK-LABEL: @mergeReverse(
; CHECK-NEXT: [[TMP1:%.*]] = getelementptr inbounds i8, ptr [[P:%.*]], i64 10
ret ptr %2
}
-; result = (i8*) (([20 x i8]*) p + 1) + 17
+; result = (ptr) ((ptr) p + 1) + 17
define ptr @array1(ptr %p) {
; CHECK-LABEL: @array1(
; CHECK-NEXT: [[TMP1:%.*]] = getelementptr inbounds [20 x i8], ptr [[P:%.*]], i64 1, i64 17
ret ptr %2
}
-; Converted to i8* and merged.
-; result = (i8*) p + 20
+; Converted to ptr and merged.
+; result = (ptr) p + 20
define ptr @array2(ptr %p) {
; CHECK-LABEL: @array2(
; CHECK-NEXT: [[TMP1:%.*]] = getelementptr inbounds i8, ptr [[P:%.*]], i64 20
ret ptr %2
}
-; Converted to i8* and merged.
-; result = (i8*) p + 36
+; Converted to ptr and merged.
+; result = (ptr) p + 36
define ptr @struct1(ptr %p) {
; CHECK-LABEL: @struct1(
; CHECK-NEXT: [[TMP1:%.*]] = getelementptr inbounds i8, ptr [[P:%.*]], i64 36
ret ptr %2
}
-; result = (i8*) &((struct.B) p)[0].member2.member0 + 7
+; result = (ptr) &((struct.B) p)[0].member2.member0 + 7
define ptr @structStruct(ptr %p) {
; CHECK-LABEL: @structStruct(
; CHECK-NEXT: [[TMP1:%.*]] = getelementptr inbounds [[STRUCT_B:%.*]], ptr [[P:%.*]], i64 0, i32 2, i32 0, i64 7
; First GEP offset is not divisible by last GEP's source element size, but first
; GEP points to an array such that the last GEP offset is divisible by the
; array's element size, so the first GEP can be rewritten with an extra index.
-; result = (i16*) &((struct.B*) p)[i].member1 + 2
+; result = (ptr) &((struct.B*) p)[i].member1 + 2
define ptr @appendIndex(ptr %p, i64 %i) {
; CHECK-LABEL: @appendIndex(
; CHECK-NEXT: [[TMP1:%.*]] = getelementptr inbounds [[STRUCT_B:%.*]], ptr [[P:%.*]], i64 [[I:%.*]], i32 1, i64 2
ret ptr %2
}
-; Offset of either GEP is not divisible by the other's size, converted to i8*
+; Offset of either GEP is not divisible by the other's size, converted to ptr
; and merged.
; Here i24 is 8-bit aligned.
-; result = (i8*) p + 7
+; result = (ptr) p + 7
define ptr @notDivisible(ptr %p) {
; CHECK-LABEL: @notDivisible(
; CHECK-NEXT: [[TMP1:%.*]] = getelementptr inbounds i8, ptr [[P:%.*]], i64 7
declare void @use(i32) readonly
; We prefer to canonicalize the machine width gep indices early
-define void @test(i32* %p, i32 %index) {
+define void @test(ptr %p, i32 %index) {
; CHECK-LABEL: @test
; CHECK-NEXT: %1 = sext i32 %index to i64
-; CHECK-NEXT: %addr = getelementptr i32, i32* %p, i64 %1
- %addr = getelementptr i32, i32* %p, i32 %index
- %val = load i32, i32* %addr
+; CHECK-NEXT: %addr = getelementptr i32, ptr %p, i64 %1
+ %addr = getelementptr i32, ptr %p, i32 %index
+ %val = load i32, ptr %addr
call void @use(i32 %val)
ret void
}
; If they've already been canonicalized via zext, that's fine
-define void @test2(i32* %p, i32 %index) {
+define void @test2(ptr %p, i32 %index) {
; CHECK-LABEL: @test2
; CHECK-NEXT: %i = zext i32 %index to i64
-; CHECK-NEXT: %addr = getelementptr i32, i32* %p, i64 %i
+; CHECK-NEXT: %addr = getelementptr i32, ptr %p, i64 %i
%i = zext i32 %index to i64
- %addr = getelementptr i32, i32* %p, i64 %i
- %val = load i32, i32* %addr
+ %addr = getelementptr i32, ptr %p, i64 %i
+ %val = load i32, ptr %addr
call void @use(i32 %val)
ret void
}
; If we can use a zext, we prefer that. This requires
; knowing that the index is positive.
-define void @test3(i32* %p, i32 %index) {
+define void @test3(ptr %p, i32 %index) {
; CHECK-LABEL: @test3
; CHECK: zext
; CHECK-NOT: sext
- %addr_begin = getelementptr i32, i32* %p, i64 40
- %addr_fixed = getelementptr i32, i32* %addr_begin, i64 48
- %val_fixed = load i32, i32* %addr_fixed, !range !0
- %addr = getelementptr i32, i32* %addr_begin, i32 %val_fixed
- %val = load i32, i32* %addr
+ %addr_begin = getelementptr i32, ptr %p, i64 40
+ %addr_fixed = getelementptr i32, ptr %addr_begin, i64 48
+ %val_fixed = load i32, ptr %addr_fixed, !range !0
+ %addr = getelementptr i32, ptr %addr_begin, i32 %val_fixed
+ %val = load i32, ptr %addr
call void @use(i32 %val)
ret void
}
; Replace sext with zext where possible
-define void @test4(i32* %p, i32 %index) {
+define void @test4(ptr %p, i32 %index) {
; CHECK-LABEL: @test4
; CHECK: zext
; CHECK-NOT: sext
- %addr_begin = getelementptr i32, i32* %p, i64 40
- %addr_fixed = getelementptr i32, i32* %addr_begin, i64 48
- %val_fixed = load i32, i32* %addr_fixed, !range !0
+ %addr_begin = getelementptr i32, ptr %p, i64 40
+ %addr_fixed = getelementptr i32, ptr %addr_begin, i64 48
+ %val_fixed = load i32, ptr %addr_fixed, !range !0
%i = sext i32 %val_fixed to i64
- %addr = getelementptr i32, i32* %addr_begin, i64 %i
- %val = load i32, i32* %addr
+ %addr = getelementptr i32, ptr %addr_begin, i64 %i
+ %val = load i32, ptr %addr
call void @use(i32 %val)
ret void
}
; NOTE: Assertions have been autogenerated by utils/update_test_checks.py
; RUN: opt -passes=instcombine %s -S | FileCheck %s
-define i32* @vector_splat_indices_v2i64_ext0(i32* %a) {
+define ptr @vector_splat_indices_v2i64_ext0(ptr %a) {
; CHECK-LABEL: @vector_splat_indices_v2i64_ext0(
-; CHECK-NEXT: [[RES:%.*]] = getelementptr i32, i32* [[A:%.*]], i64 4
-; CHECK-NEXT: ret i32* [[RES]]
+; CHECK-NEXT: [[RES:%.*]] = getelementptr i32, ptr [[A:%.*]], i64 4
+; CHECK-NEXT: ret ptr [[RES]]
;
- %gep = getelementptr i32, i32* %a, <2 x i64> <i64 4, i64 4>
- %res = extractelement <2 x i32*> %gep, i32 0
- ret i32* %res
+ %gep = getelementptr i32, ptr %a, <2 x i64> <i64 4, i64 4>
+ %res = extractelement <2 x ptr> %gep, i32 0
+ ret ptr %res
}
-define i32* @vector_splat_indices_nxv2i64_ext0(i32* %a) {
+define ptr @vector_splat_indices_nxv2i64_ext0(ptr %a) {
; CHECK-LABEL: @vector_splat_indices_nxv2i64_ext0(
-; CHECK-NEXT: [[RES:%.*]] = getelementptr inbounds i32, i32* [[A:%.*]], i64 4
-; CHECK-NEXT: ret i32* [[RES]]
+; CHECK-NEXT: [[RES:%.*]] = getelementptr inbounds i32, ptr [[A:%.*]], i64 4
+; CHECK-NEXT: ret ptr [[RES]]
;
%tmp = insertelement <vscale x 2 x i64> poison, i64 4, i32 0
%splatof4 = shufflevector <vscale x 2 x i64> %tmp, <vscale x 2 x i64> poison, <vscale x 2 x i32> zeroinitializer
- %gep = getelementptr inbounds i32, i32* %a, <vscale x 2 x i64> %splatof4
- %res = extractelement <vscale x 2 x i32*> %gep, i32 0
- ret i32* %res
+ %gep = getelementptr inbounds i32, ptr %a, <vscale x 2 x i64> %splatof4
+ %res = extractelement <vscale x 2 x ptr> %gep, i32 0
+ ret ptr %res
}
-define i32* @vector_indices_v2i64_ext0(i32* %a, <2 x i64> %indices) {
+define ptr @vector_indices_v2i64_ext0(ptr %a, <2 x i64> %indices) {
; CHECK-LABEL: @vector_indices_v2i64_ext0(
; CHECK-NEXT: [[TMP1:%.*]] = extractelement <2 x i64> [[INDICES:%.*]], i64 0
-; CHECK-NEXT: [[RES:%.*]] = getelementptr i32, i32* [[A:%.*]], i64 [[TMP1]]
-; CHECK-NEXT: ret i32* [[RES]]
+; CHECK-NEXT: [[RES:%.*]] = getelementptr i32, ptr [[A:%.*]], i64 [[TMP1]]
+; CHECK-NEXT: ret ptr [[RES]]
;
- %gep = getelementptr i32, i32* %a, <2 x i64> %indices
- %res = extractelement <2 x i32*> %gep, i32 0
- ret i32* %res
+ %gep = getelementptr i32, ptr %a, <2 x i64> %indices
+ %res = extractelement <2 x ptr> %gep, i32 0
+ ret ptr %res
}
-define i32* @vector_indices_nxv1i64_ext0(i32* %a, <vscale x 1 x i64> %indices) {
+define ptr @vector_indices_nxv1i64_ext0(ptr %a, <vscale x 1 x i64> %indices) {
; CHECK-LABEL: @vector_indices_nxv1i64_ext0(
; CHECK-NEXT: [[TMP1:%.*]] = extractelement <vscale x 1 x i64> [[INDICES:%.*]], i64 0
-; CHECK-NEXT: [[RES:%.*]] = getelementptr i32, i32* [[A:%.*]], i64 [[TMP1]]
-; CHECK-NEXT: ret i32* [[RES]]
+; CHECK-NEXT: [[RES:%.*]] = getelementptr i32, ptr [[A:%.*]], i64 [[TMP1]]
+; CHECK-NEXT: ret ptr [[RES]]
;
- %gep = getelementptr i32, i32* %a, <vscale x 1 x i64> %indices
- %res = extractelement <vscale x 1 x i32*> %gep, i32 0
- ret i32* %res
+ %gep = getelementptr i32, ptr %a, <vscale x 1 x i64> %indices
+ %res = extractelement <vscale x 1 x ptr> %gep, i32 0
+ ret ptr %res
}
-define i32* @vector_splat_ptrs_v2i64_ext0(i32* %a, i64 %index) {
+define ptr @vector_splat_ptrs_v2i64_ext0(ptr %a, i64 %index) {
; CHECK-LABEL: @vector_splat_ptrs_v2i64_ext0(
-; CHECK-NEXT: [[RES:%.*]] = getelementptr i32, i32* [[A:%.*]], i64 [[INDEX:%.*]]
-; CHECK-NEXT: ret i32* [[RES]]
+; CHECK-NEXT: [[RES:%.*]] = getelementptr i32, ptr [[A:%.*]], i64 [[INDEX:%.*]]
+; CHECK-NEXT: ret ptr [[RES]]
;
- %tmp = insertelement <2 x i32*> poison, i32* %a, i32 0
- %splatofa = shufflevector <2 x i32*> %tmp, <2 x i32*> poison, <2 x i32> zeroinitializer
- %gep = getelementptr i32, <2 x i32*> %splatofa, i64 %index
- %res = extractelement <2 x i32*> %gep, i32 0
- ret i32* %res
+ %tmp = insertelement <2 x ptr> poison, ptr %a, i32 0
+ %splatofa = shufflevector <2 x ptr> %tmp, <2 x ptr> poison, <2 x i32> zeroinitializer
+ %gep = getelementptr i32, <2 x ptr> %splatofa, i64 %index
+ %res = extractelement <2 x ptr> %gep, i32 0
+ ret ptr %res
}
-define i32* @vector_splat_ptrs_nxv2i64_ext0(i32* %a, i64 %index) {
+define ptr @vector_splat_ptrs_nxv2i64_ext0(ptr %a, i64 %index) {
; CHECK-LABEL: @vector_splat_ptrs_nxv2i64_ext0(
-; CHECK-NEXT: [[RES:%.*]] = getelementptr i32, i32* [[A:%.*]], i64 [[INDEX:%.*]]
-; CHECK-NEXT: ret i32* [[RES]]
+; CHECK-NEXT: [[RES:%.*]] = getelementptr i32, ptr [[A:%.*]], i64 [[INDEX:%.*]]
+; CHECK-NEXT: ret ptr [[RES]]
;
- %tmp = insertelement <vscale x 2 x i32*> poison, i32* %a, i32 0
- %splatofa = shufflevector <vscale x 2 x i32*> %tmp, <vscale x 2 x i32*> poison, <vscale x 2 x i32> zeroinitializer
- %gep = getelementptr i32, <vscale x 2 x i32*> %splatofa, i64 %index
- %res = extractelement <vscale x 2 x i32*> %gep, i32 0
- ret i32* %res
+ %tmp = insertelement <vscale x 2 x ptr> poison, ptr %a, i32 0
+ %splatofa = shufflevector <vscale x 2 x ptr> %tmp, <vscale x 2 x ptr> poison, <vscale x 2 x i32> zeroinitializer
+ %gep = getelementptr i32, <vscale x 2 x ptr> %splatofa, i64 %index
+ %res = extractelement <vscale x 2 x ptr> %gep, i32 0
+ ret ptr %res
}
-define float* @vector_struct1_splat_indices_v4i64_ext1({float, float}* %a) {
+define ptr @vector_struct1_splat_indices_v4i64_ext1(ptr %a) {
; CHECK-LABEL: @vector_struct1_splat_indices_v4i64_ext1(
-; CHECK-NEXT: [[RES:%.*]] = getelementptr { float, float }, { float, float }* [[A:%.*]], i64 4, i32 0
-; CHECK-NEXT: ret float* [[RES]]
+; CHECK-NEXT: [[RES:%.*]] = getelementptr { float, float }, ptr [[A:%.*]], i64 4, i32 0
+; CHECK-NEXT: ret ptr [[RES]]
;
- %gep = getelementptr {float, float}, {float, float}* %a, <4 x i32> <i32 4, i32 4, i32 4, i32 4>, i32 0
- %res = extractelement <4 x float*> %gep, i32 1
- ret float* %res
+ %gep = getelementptr {float, float}, ptr %a, <4 x i32> <i32 4, i32 4, i32 4, i32 4>, i32 0
+ %res = extractelement <4 x ptr> %gep, i32 1
+ ret ptr %res
}
-define float* @vector_struct2_splat_indices_v4i64_ext1({float, [8 x float]}* %a) {
+define ptr @vector_struct2_splat_indices_v4i64_ext1(ptr %a) {
; CHECK-LABEL: @vector_struct2_splat_indices_v4i64_ext1(
-; CHECK-NEXT: [[RES:%.*]] = getelementptr { float, [8 x float] }, { float, [8 x float] }* [[A:%.*]], i64 2, i32 1, i64 4
-; CHECK-NEXT: ret float* [[RES]]
+; CHECK-NEXT: [[RES:%.*]] = getelementptr { float, [8 x float] }, ptr [[A:%.*]], i64 2, i32 1, i64 4
+; CHECK-NEXT: ret ptr [[RES]]
;
- %gep = getelementptr {float, [8 x float]}, {float, [8 x float]}* %a, i32 2, i32 1, <4 x i32> <i32 4, i32 4, i32 4, i32 4>
- %res = extractelement <4 x float*> %gep, i32 1
- ret float* %res
+ %gep = getelementptr {float, [8 x float]}, ptr %a, i32 2, i32 1, <4 x i32> <i32 4, i32 4, i32 4, i32 4>
+ %res = extractelement <4 x ptr> %gep, i32 1
+ ret ptr %res
}
; Negative tests
-define i32* @vector_indices_nxv2i64_ext3(i32* %a, <vscale x 2 x i64> %indices) {
+define ptr @vector_indices_nxv2i64_ext3(ptr %a, <vscale x 2 x i64> %indices) {
; CHECK-LABEL: @vector_indices_nxv2i64_ext3(
-; CHECK-NEXT: [[GEP:%.*]] = getelementptr i32, i32* [[A:%.*]], <vscale x 2 x i64> [[INDICES:%.*]]
-; CHECK-NEXT: [[RES:%.*]] = extractelement <vscale x 2 x i32*> [[GEP]], i64 3
-; CHECK-NEXT: ret i32* [[RES]]
+; CHECK-NEXT: [[GEP:%.*]] = getelementptr i32, ptr [[A:%.*]], <vscale x 2 x i64> [[INDICES:%.*]]
+; CHECK-NEXT: [[RES:%.*]] = extractelement <vscale x 2 x ptr> [[GEP]], i64 3
+; CHECK-NEXT: ret ptr [[RES]]
;
- %gep = getelementptr i32, i32* %a, <vscale x 2 x i64> %indices
- %res = extractelement <vscale x 2 x i32*> %gep, i32 3
- ret i32* %res
+ %gep = getelementptr i32, ptr %a, <vscale x 2 x i64> %indices
+ %res = extractelement <vscale x 2 x ptr> %gep, i32 3
+ ret ptr %res
}
-define i32* @vector_indices_nxv2i64_extN(i32* %a, <vscale x 2 x i64> %indices, i32 %N) {
+define ptr @vector_indices_nxv2i64_extN(ptr %a, <vscale x 2 x i64> %indices, i32 %N) {
; CHECK-LABEL: @vector_indices_nxv2i64_extN(
-; CHECK-NEXT: [[GEP:%.*]] = getelementptr i32, i32* [[A:%.*]], <vscale x 2 x i64> [[INDICES:%.*]]
-; CHECK-NEXT: [[RES:%.*]] = extractelement <vscale x 2 x i32*> [[GEP]], i32 [[N:%.*]]
-; CHECK-NEXT: ret i32* [[RES]]
+; CHECK-NEXT: [[GEP:%.*]] = getelementptr i32, ptr [[A:%.*]], <vscale x 2 x i64> [[INDICES:%.*]]
+; CHECK-NEXT: [[RES:%.*]] = extractelement <vscale x 2 x ptr> [[GEP]], i32 [[N:%.*]]
+; CHECK-NEXT: ret ptr [[RES]]
;
- %gep = getelementptr i32, i32* %a, <vscale x 2 x i64> %indices
- %res = extractelement <vscale x 2 x i32*> %gep, i32 %N
- ret i32* %res
+ %gep = getelementptr i32, ptr %a, <vscale x 2 x i64> %indices
+ %res = extractelement <vscale x 2 x ptr> %gep, i32 %N
+ ret ptr %res
}
-define void @vector_indices_nxv2i64_mulitple_use(i32* %a, <vscale x 2 x i64> %indices, i32** %b, i32** %c) {
+define void @vector_indices_nxv2i64_mulitple_use(ptr %a, <vscale x 2 x i64> %indices, ptr %b, ptr %c) {
; CHECK-LABEL: @vector_indices_nxv2i64_mulitple_use(
-; CHECK-NEXT: [[GEP:%.*]] = getelementptr i32, i32* [[A:%.*]], <vscale x 2 x i64> [[INDICES:%.*]]
-; CHECK-NEXT: [[LANE0:%.*]] = extractelement <vscale x 2 x i32*> [[GEP]], i64 0
-; CHECK-NEXT: [[LANE1:%.*]] = extractelement <vscale x 2 x i32*> [[GEP]], i64 1
-; CHECK-NEXT: store i32* [[LANE0]], i32** [[B:%.*]], align 8
-; CHECK-NEXT: store i32* [[LANE1]], i32** [[C:%.*]], align 8
+; CHECK-NEXT: [[GEP:%.*]] = getelementptr i32, ptr [[A:%.*]], <vscale x 2 x i64> [[INDICES:%.*]]
+; CHECK-NEXT: [[LANE0:%.*]] = extractelement <vscale x 2 x ptr> [[GEP]], i64 0
+; CHECK-NEXT: [[LANE1:%.*]] = extractelement <vscale x 2 x ptr> [[GEP]], i64 1
+; CHECK-NEXT: store ptr [[LANE0]], ptr [[B:%.*]], align 8
+; CHECK-NEXT: store ptr [[LANE1]], ptr [[C:%.*]], align 8
; CHECK-NEXT: ret void
;
- %gep = getelementptr i32, i32* %a, <vscale x 2 x i64> %indices
- %lane0 = extractelement <vscale x 2 x i32*> %gep, i32 0
- %lane1 = extractelement <vscale x 2 x i32*> %gep, i32 1
- store i32* %lane0, i32** %b, align 8
- store i32* %lane1, i32** %c, align 8
+ %gep = getelementptr i32, ptr %a, <vscale x 2 x i64> %indices
+ %lane0 = extractelement <vscale x 2 x ptr> %gep, i32 0
+ %lane1 = extractelement <vscale x 2 x ptr> %gep, i32 1
+ store ptr %lane0, ptr %b, align 8
+ store ptr %lane1, ptr %c, align 8
ret void
}
-define i32* @vector_ptrs_and_indices_ext0(<vscale x 4 x i32*> %a, <vscale x 4 x i64> %indices) {
+define ptr @vector_ptrs_and_indices_ext0(<vscale x 4 x ptr> %a, <vscale x 4 x i64> %indices) {
; CHECK-LABEL: @vector_ptrs_and_indices_ext0(
-; CHECK-NEXT: [[GEP:%.*]] = getelementptr i32, <vscale x 4 x i32*> [[A:%.*]], <vscale x 4 x i64> [[INDICES:%.*]]
-; CHECK-NEXT: [[RES:%.*]] = extractelement <vscale x 4 x i32*> [[GEP]], i64 0
-; CHECK-NEXT: ret i32* [[RES]]
+; CHECK-NEXT: [[GEP:%.*]] = getelementptr i32, <vscale x 4 x ptr> [[A:%.*]], <vscale x 4 x i64> [[INDICES:%.*]]
+; CHECK-NEXT: [[RES:%.*]] = extractelement <vscale x 4 x ptr> [[GEP]], i64 0
+; CHECK-NEXT: ret ptr [[RES]]
;
- %gep = getelementptr i32, <vscale x 4 x i32*> %a, <vscale x 4 x i64> %indices
- %res = extractelement <vscale x 4 x i32*> %gep, i32 0
- ret i32* %res
+ %gep = getelementptr i32, <vscale x 4 x ptr> %a, <vscale x 4 x i64> %indices
+ %res = extractelement <vscale x 4 x ptr> %gep, i32 0
+ ret ptr %res
}
@buffer = external global [64 x float]
-declare void @use(i8*)
+declare void @use(ptr)
define void @f() {
- call void @use(i8* getelementptr (i8, i8* getelementptr (i8, i8* bitcast ([64 x float]* @buffer to i8*), i64 and (i64 sub (i64 0, i64 ptrtoint ([64 x float]* @buffer to i64)), i64 63)), i64 64))
+ call void @use(ptr getelementptr (i8, ptr getelementptr (i8, ptr @buffer, i64 and (i64 sub (i64 0, i64 ptrtoint (ptr @buffer to i64)), i64 63)), i64 64))
ret void
}
%struct.matrix_float3x3 = type { [3 x <3 x float>] }
@matrix_identity_float3x3 = external global %struct.matrix_float3x3, align 16
-@bbb = global float* getelementptr inbounds (%struct.matrix_float3x3, %struct.matrix_float3x3* @matrix_identity_float3x3, i64 0, i32 0, i64 1, i64 3)
-; CHECK: @bbb = global float* getelementptr inbounds (%struct.matrix_float3x3, %struct.matrix_float3x3* @matrix_identity_float3x3, i64 0, i32 0, i64 1, i64 3)
+@bbb = global ptr getelementptr inbounds (%struct.matrix_float3x3, ptr @matrix_identity_float3x3, i64 0, i32 0, i64 1, i64 3)
+; CHECK: @bbb = global ptr getelementptr inbounds (%struct.matrix_float3x3, ptr @matrix_identity_float3x3, i64 0, i32 0, i64 1, i64 3)
define i8 @constantexpr(i8 %or) local_unnamed_addr #0 {
; CHECK-LABEL: @constantexpr(
; CHECK-NEXT: entry:
-; CHECK-NEXT: [[R:%.*]] = xor i8 [[OR:%.*]], xor (i8 ptrtoint (i32* @global_constant to i8), i8 ptrtoint (i32* @global_constant2 to i8))
+; CHECK-NEXT: [[R:%.*]] = xor i8 [[OR:%.*]], xor (i8 ptrtoint (ptr @global_constant to i8), i8 ptrtoint (ptr @global_constant2 to i8))
; CHECK-NEXT: ret i8 [[R]]
;
entry:
- %r = xor i8 %or, xor (i8 xor (i8 ptrtoint (i32* @global_constant to i8), i8 -1), i8 xor (i8 ptrtoint (i32* @global_constant2 to i8), i8 -1))
+ %r = xor i8 %or, xor (i8 xor (i8 ptrtoint (ptr @global_constant to i8), i8 -1), i8 xor (i8 ptrtoint (ptr @global_constant2 to i8), i8 -1))
ret i8 %r
}
@global_constant3 = external global [6 x [1 x i64]], align 1
@global_constant4 = external global i64, align 1
-@global_constant5 = external global i16*, align 1
+@global_constant5 = external global ptr, align 1
define i16 @constantexpr2() {
; CHECK-LABEL: @constantexpr2(
-; CHECK-NEXT: [[I2:%.*]] = load i16*, i16** @global_constant5, align 1
-; CHECK-NEXT: [[I3:%.*]] = load i16, i16* [[I2]], align 1
-; CHECK-NEXT: [[I5:%.*]] = xor i16 [[I3]], xor (i16 zext (i1 icmp ne (i64* getelementptr inbounds ([6 x [1 x i64]], [6 x [1 x i64]]* @global_constant3, i64 0, i64 5, i64 0), i64* @global_constant4) to i16), i16 -1)
+; CHECK-NEXT: [[I2:%.*]] = load ptr, ptr @global_constant5, align 1
+; CHECK-NEXT: [[I3:%.*]] = load i16, ptr [[I2]], align 1
+; CHECK-NEXT: [[I5:%.*]] = xor i16 [[I3]], xor (i16 zext (i1 icmp ne (ptr getelementptr inbounds ([6 x [1 x i64]], ptr @global_constant3, i64 0, i64 5, i64 0), ptr @global_constant4) to i16), i16 -1)
; CHECK-NEXT: ret i16 [[I5]]
;
- %i0 = icmp ne i64* getelementptr inbounds ([6 x [1 x i64]], [6 x [1 x i64]]* @global_constant3, i16 0, i16 5, i16 0), @global_constant4
+ %i0 = icmp ne ptr getelementptr inbounds ([6 x [1 x i64]], ptr @global_constant3, i16 0, i16 5, i16 0), @global_constant4
%i1 = zext i1 %i0 to i16
- %i2 = load i16*, i16** @global_constant5, align 1
- %i3 = load i16, i16* %i2, align 1
+ %i2 = load ptr, ptr @global_constant5, align 1
+ %i3 = load i16, ptr %i2, align 1
%i4 = xor i16 %i3, %i1
%i5 = xor i16 %i4, -1
ret i16 %i5
ret <2 x i1> %c
}
-define i1 @sum_ugt_op_uses(i8 %p1, i8 %p2, i8* %p3) {
+define i1 @sum_ugt_op_uses(i8 %p1, i8 %p2, ptr %p3) {
; CHECK-LABEL: @sum_ugt_op_uses(
; CHECK-NEXT: [[X:%.*]] = sdiv i8 42, [[P1:%.*]]
; CHECK-NEXT: [[Y:%.*]] = sdiv i8 42, [[P2:%.*]]
; CHECK-NEXT: [[A:%.*]] = add nsw i8 [[X]], [[Y]]
-; CHECK-NEXT: store i8 [[A]], i8* [[P3:%.*]], align 1
+; CHECK-NEXT: store i8 [[A]], ptr [[P3:%.*]], align 1
; CHECK-NEXT: [[C:%.*]] = icmp ugt i8 [[X]], [[A]]
; CHECK-NEXT: ret i1 [[C]]
;
%x = sdiv i8 42, %p1
%y = sdiv i8 42, %p2
%a = add i8 %x, %y
- store i8 %a, i8* %p3
+ store i8 %a, ptr %p3
%c = icmp ugt i8 %x, %a
ret i1 %c
}
ret i1 %c
}
-define i1 @sum_ult_op_uses(i8 %x, i8 %y, i8* %p) {
+define i1 @sum_ult_op_uses(i8 %x, i8 %y, ptr %p) {
; CHECK-LABEL: @sum_ult_op_uses(
; CHECK-NEXT: [[A:%.*]] = add i8 [[Y:%.*]], [[X:%.*]]
-; CHECK-NEXT: store i8 [[A]], i8* [[P:%.*]], align 1
+; CHECK-NEXT: store i8 [[A]], ptr [[P:%.*]], align 1
; CHECK-NEXT: [[C:%.*]] = icmp ult i8 [[A]], [[X]]
; CHECK-NEXT: ret i1 [[C]]
;
%a = add i8 %y, %x
- store i8 %a, i8* %p
+ store i8 %a, ptr %p
%c = icmp ult i8 %a, %x
ret i1 %c
}
; RUN: opt < %s -passes=instcombine -S | FileCheck %s
-declare i32 @f32(i32**, i32**)
+declare i32 @f32(ptr, ptr)
-declare i32 @f64(i64**, i64**)
+declare i32 @f64(ptr, ptr)
define i1 @icmp_func() {
; CHECK-LABEL: @icmp_func(
; CHECK: ret i1 false
- %cmp = icmp eq i32 (i8*, i8*)* bitcast (i32 (i32**, i32**)* @f32 to i32 (i8*, i8*)*), bitcast (i32 (i64**, i64**)* @f64 to i32 (i8*, i8*)*)
+ %cmp = icmp eq ptr @f32, @f64
ret i1 %cmp
}
-define i1 @icmp_fptr(i32 (i8*, i8*)*) {
+define i1 @icmp_fptr(ptr) {
; CHECK-LABEL: @icmp_fptr(
-; CHECK: %cmp = icmp ne i32 (i8*, i8*)* %0, bitcast (i32 (i32**, i32**)* @f32 to i32 (i8*, i8*)*)
+; CHECK: %cmp = icmp ne ptr %0, @f32
; CHECK: ret i1 %cmp
- %cmp = icmp ne i32 (i8*, i8*)* bitcast (i32 (i32**, i32**)* @f32 to i32 (i8*, i8*)*), %0
+ %cmp = icmp ne ptr @f32, %0
ret i1 %cmp
}
; CHECK-LABEL: define i32 @icmp_glob(i32 %x, i32 %y)
; CHECK-NEXT: ret i32 %y
;
- %sel = select i1 icmp eq (i32* bitcast (i32 (i32, i32)* @icmp_glob to i32*), i32* @b), i32 %x, i32 %y
+ %sel = select i1 icmp eq (ptr @icmp_glob, ptr @b), i32 %x, i32 %y
ret i32 %sel
}
; This used to infinite loop because of a conflict
; with min/max canonicalization.
-define i32 @PR48900(i32 %i, i1* %p) {
+define i32 @PR48900(i32 %i, ptr %p) {
; CHECK-LABEL: @PR48900(
; CHECK-NEXT: [[TMP1:%.*]] = call i32 @llvm.umax.i32(i32 [[I:%.*]], i32 1)
; CHECK-NEXT: [[I4:%.*]] = icmp sgt i32 [[TMP1]], 0
; This used to infinite loop because of a conflict
; with min/max canonicalization.
-define i8 @PR48900_alt(i8 %i, i1* %p) {
+define i8 @PR48900_alt(i8 %i, ptr %p) {
; CHECK-LABEL: @PR48900_alt(
; CHECK-NEXT: [[TMP1:%.*]] = call i8 @llvm.smax.i8(i8 [[I:%.*]], i8 -127)
; CHECK-NEXT: [[I4:%.*]] = icmp ugt i8 [[TMP1]], -128
; CHECK: if.then9:
; CHECK-NEXT: [[CONV:%.*]] = zext i8 [[BETH:%.*]] to i32
; CHECK-NEXT: [[MUL:%.*]] = mul nuw nsw i32 [[CONV]], [[CONV]]
-; CHECK-NEXT: [[TINKY:%.*]] = load i16, i16* @glob, align 2
+; CHECK-NEXT: [[TINKY:%.*]] = load i16, ptr @glob, align 2
; CHECK-NEXT: [[TMP1:%.*]] = trunc i32 [[MUL]] to i16
; CHECK-NEXT: [[CONV14:%.*]] = and i16 [[TINKY]], [[TMP1]]
-; CHECK-NEXT: store i16 [[CONV14]], i16* @glob, align 2
+; CHECK-NEXT: store i16 [[CONV14]], ptr @glob, align 2
; CHECK-NEXT: ret void
;
%conv = zext i8 %beth to i32
br i1 %tobool8, label %if.then9, label %if.then9
if.then9:
- %tinky = load i16, i16* @glob
+ %tinky = load i16, ptr @glob
%conv13 = sext i16 %tinky to i32
%and = and i32 %mul, %conv13
%conv14 = trunc i32 %and to i16
- store i16 %conv14, i16* @glob
+ store i16 %conv14, ptr @glob
ret void
}
define i1 @oss_fuzz_39934(i32 %arg) {
; CHECK-LABEL: @oss_fuzz_39934(
; CHECK-NEXT: [[B13:%.*]] = mul nsw i32 [[ARG:%.*]], -65536
-; CHECK-NEXT: [[C10:%.*]] = icmp ne i32 [[B13]], mul (i32 or (i32 zext (i1 icmp eq (i32* @g, i32* null) to i32), i32 65537), i32 -65536)
+; CHECK-NEXT: [[C10:%.*]] = icmp ne i32 [[B13]], mul (i32 or (i32 zext (i1 icmp eq (ptr @g, ptr null) to i32), i32 65537), i32 -65536)
; CHECK-NEXT: ret i1 [[C10]]
;
%B13 = mul nsw i32 %arg, -65536
- %C10 = icmp ne i32 mul (i32 or (i32 zext (i1 icmp eq (i32* @g, i32* null) to i32), i32 65537), i32 -65536), %B13
+ %C10 = icmp ne i32 mul (i32 or (i32 zext (i1 icmp eq (ptr @g, ptr null) to i32), i32 65537), i32 -65536), %B13
ret i1 %C10
}
declare void @use_vec(<2 x i8>)
; Definitely out of range
-define i1 @test_nonzero(i32* nocapture readonly %arg) {
+define i1 @test_nonzero(ptr nocapture readonly %arg) {
; CHECK-LABEL: @test_nonzero(
; CHECK-NEXT: ret i1 true
;
- %val = load i32, i32* %arg, !range !0
+ %val = load i32, ptr %arg, !range !0
%rval = icmp ne i32 %val, 0
ret i1 %rval
}
-define i1 @test_nonzero2(i32* nocapture readonly %arg) {
+define i1 @test_nonzero2(ptr nocapture readonly %arg) {
; CHECK-LABEL: @test_nonzero2(
; CHECK-NEXT: ret i1 false
;
- %val = load i32, i32* %arg, !range !0
+ %val = load i32, ptr %arg, !range !0
%rval = icmp eq i32 %val, 0
ret i1 %rval
}
; Potentially in range
-define i1 @test_nonzero3(i32* nocapture readonly %arg) {
+define i1 @test_nonzero3(ptr nocapture readonly %arg) {
; CHECK-LABEL: @test_nonzero3(
-; CHECK-NEXT: [[VAL:%.*]] = load i32, i32* [[ARG:%.*]], align 4, !range [[RNG0:![0-9]+]]
+; CHECK-NEXT: [[VAL:%.*]] = load i32, ptr [[ARG:%.*]], align 4, !range [[RNG0:![0-9]+]]
; CHECK-NEXT: [[RVAL:%.*]] = icmp ne i32 [[VAL]], 0
; CHECK-NEXT: ret i1 [[RVAL]]
;
; Check that this does not trigger - it wouldn't be legal
- %val = load i32, i32* %arg, !range !1
+ %val = load i32, ptr %arg, !range !1
%rval = icmp ne i32 %val, 0
ret i1 %rval
}
; Definitely in range
-define i1 @test_nonzero4(i8* nocapture readonly %arg) {
+define i1 @test_nonzero4(ptr nocapture readonly %arg) {
; CHECK-LABEL: @test_nonzero4(
; CHECK-NEXT: ret i1 false
;
- %val = load i8, i8* %arg, !range !2
+ %val = load i8, ptr %arg, !range !2
%rval = icmp ne i8 %val, 0
ret i1 %rval
}
-define i1 @test_nonzero5(i8* nocapture readonly %arg) {
+define i1 @test_nonzero5(ptr nocapture readonly %arg) {
; CHECK-LABEL: @test_nonzero5(
; CHECK-NEXT: ret i1 false
;
- %val = load i8, i8* %arg, !range !2
+ %val = load i8, ptr %arg, !range !2
%rval = icmp ugt i8 %val, 0
ret i1 %rval
}
; Cheaper checks (most values in range meet requirements)
-define i1 @test_nonzero6(i8* %argw) {
+define i1 @test_nonzero6(ptr %argw) {
; CHECK-LABEL: @test_nonzero6(
-; CHECK-NEXT: [[VAL:%.*]] = load i8, i8* [[ARGW:%.*]], align 1, !range [[RNG1:![0-9]+]]
+; CHECK-NEXT: [[VAL:%.*]] = load i8, ptr [[ARGW:%.*]], align 1, !range [[RNG1:![0-9]+]]
; CHECK-NEXT: [[RVAL:%.*]] = icmp ne i8 [[VAL]], 0
; CHECK-NEXT: ret i1 [[RVAL]]
;
- %val = load i8, i8* %argw, !range !3
+ %val = load i8, ptr %argw, !range !3
%rval = icmp sgt i8 %val, 0
ret i1 %rval
}
; Constant not in range, should return true.
-define i1 @test_not_in_range(i32* nocapture readonly %arg) {
+define i1 @test_not_in_range(ptr nocapture readonly %arg) {
; CHECK-LABEL: @test_not_in_range(
; CHECK-NEXT: ret i1 true
;
- %val = load i32, i32* %arg, !range !0
+ %val = load i32, ptr %arg, !range !0
%rval = icmp ne i32 %val, 6
ret i1 %rval
}
; Constant in range, can not fold.
-define i1 @test_in_range(i32* nocapture readonly %arg) {
+define i1 @test_in_range(ptr nocapture readonly %arg) {
; CHECK-LABEL: @test_in_range(
-; CHECK-NEXT: [[VAL:%.*]] = load i32, i32* [[ARG:%.*]], align 4, !range [[RNG2:![0-9]+]]
+; CHECK-NEXT: [[VAL:%.*]] = load i32, ptr [[ARG:%.*]], align 4, !range [[RNG2:![0-9]+]]
; CHECK-NEXT: [[RVAL:%.*]] = icmp ne i32 [[VAL]], 3
; CHECK-NEXT: ret i1 [[RVAL]]
;
- %val = load i32, i32* %arg, !range !0
+ %val = load i32, ptr %arg, !range !0
%rval = icmp ne i32 %val, 3
ret i1 %rval
}
; Values in range greater than constant.
-define i1 @test_range_sgt_constant(i32* nocapture readonly %arg) {
+define i1 @test_range_sgt_constant(ptr nocapture readonly %arg) {
; CHECK-LABEL: @test_range_sgt_constant(
; CHECK-NEXT: ret i1 true
;
- %val = load i32, i32* %arg, !range !0
+ %val = load i32, ptr %arg, !range !0
%rval = icmp sgt i32 %val, 0
ret i1 %rval
}
; Values in range less than constant.
-define i1 @test_range_slt_constant(i32* nocapture readonly %arg) {
+define i1 @test_range_slt_constant(ptr nocapture readonly %arg) {
; CHECK-LABEL: @test_range_slt_constant(
; CHECK-NEXT: ret i1 false
;
- %val = load i32, i32* %arg, !range !0
+ %val = load i32, ptr %arg, !range !0
%rval = icmp sgt i32 %val, 6
ret i1 %rval
}
; Values in union of multiple sub ranges not equal to constant.
-define i1 @test_multi_range1(i32* nocapture readonly %arg) {
+define i1 @test_multi_range1(ptr nocapture readonly %arg) {
; CHECK-LABEL: @test_multi_range1(
; CHECK-NEXT: ret i1 true
;
- %val = load i32, i32* %arg, !range !4
+ %val = load i32, ptr %arg, !range !4
%rval = icmp ne i32 %val, 0
ret i1 %rval
}
; union of sub ranges could possibly equal to constant. This
; in theory could also be folded and might be implemented in
; the future if shown profitable in practice.
-define i1 @test_multi_range2(i32* nocapture readonly %arg) {
+define i1 @test_multi_range2(ptr nocapture readonly %arg) {
; CHECK-LABEL: @test_multi_range2(
-; CHECK-NEXT: [[VAL:%.*]] = load i32, i32* [[ARG:%.*]], align 4, !range [[RNG3:![0-9]+]]
+; CHECK-NEXT: [[VAL:%.*]] = load i32, ptr [[ARG:%.*]], align 4, !range [[RNG3:![0-9]+]]
; CHECK-NEXT: [[RVAL:%.*]] = icmp ne i32 [[VAL]], 7
; CHECK-NEXT: ret i1 [[RVAL]]
;
- %val = load i32, i32* %arg, !range !4
+ %val = load i32, ptr %arg, !range !4
%rval = icmp ne i32 %val, 7
ret i1 %rval
}
; Values' ranges overlap each other, so it can not be simplified.
-define i1 @test_two_ranges(i32* nocapture readonly %arg1, i32* nocapture readonly %arg2) {
+define i1 @test_two_ranges(ptr nocapture readonly %arg1, ptr nocapture readonly %arg2) {
; CHECK-LABEL: @test_two_ranges(
-; CHECK-NEXT: [[VAL1:%.*]] = load i32, i32* [[ARG1:%.*]], align 4, !range [[RNG4:![0-9]+]]
-; CHECK-NEXT: [[VAL2:%.*]] = load i32, i32* [[ARG2:%.*]], align 4, !range [[RNG5:![0-9]+]]
+; CHECK-NEXT: [[VAL1:%.*]] = load i32, ptr [[ARG1:%.*]], align 4, !range [[RNG4:![0-9]+]]
+; CHECK-NEXT: [[VAL2:%.*]] = load i32, ptr [[ARG2:%.*]], align 4, !range [[RNG5:![0-9]+]]
; CHECK-NEXT: [[RVAL:%.*]] = icmp ult i32 [[VAL2]], [[VAL1]]
; CHECK-NEXT: ret i1 [[RVAL]]
;
- %val1 = load i32, i32* %arg1, !range !5
- %val2 = load i32, i32* %arg2, !range !6
+ %val1 = load i32, ptr %arg1, !range !5
+ %val2 = load i32, ptr %arg2, !range !6
%rval = icmp ult i32 %val2, %val1
ret i1 %rval
}
; Values' ranges do not overlap each other, so it can simplified to false.
-define i1 @test_two_ranges2(i32* nocapture readonly %arg1, i32* nocapture readonly %arg2) {
+define i1 @test_two_ranges2(ptr nocapture readonly %arg1, ptr nocapture readonly %arg2) {
; CHECK-LABEL: @test_two_ranges2(
; CHECK-NEXT: ret i1 false
;
- %val1 = load i32, i32* %arg1, !range !0
- %val2 = load i32, i32* %arg2, !range !6
+ %val1 = load i32, ptr %arg1, !range !0
+ %val2 = load i32, ptr %arg2, !range !6
%rval = icmp ult i32 %val2, %val1
ret i1 %rval
}
; Values' ranges do not overlap each other, so it can simplified to true.
-define i1 @test_two_ranges3(i32* nocapture readonly %arg1, i32* nocapture readonly %arg2) {
+define i1 @test_two_ranges3(ptr nocapture readonly %arg1, ptr nocapture readonly %arg2) {
; CHECK-LABEL: @test_two_ranges3(
; CHECK-NEXT: ret i1 true
;
- %val1 = load i32, i32* %arg1, !range !0
- %val2 = load i32, i32* %arg2, !range !6
+ %val1 = load i32, ptr %arg1, !range !0
+ %val2 = load i32, ptr %arg2, !range !6
%rval = icmp ugt i32 %val2, %val1
ret i1 %rval
}
ret i1 %c
}
-define i1 @ashr_00_00_ashr_extra_use(i8 %x, i8* %ptr) {
+define i1 @ashr_00_00_ashr_extra_use(i8 %x, ptr %ptr) {
; CHECK-LABEL: @ashr_00_00_ashr_extra_use(
; CHECK-NEXT: [[S:%.*]] = ashr exact i8 [[X:%.*]], 3
; CHECK-NEXT: [[C:%.*]] = icmp ult i8 [[X]], 88
-; CHECK-NEXT: store i8 [[S]], i8* [[PTR:%.*]], align 1
+; CHECK-NEXT: store i8 [[S]], ptr [[PTR:%.*]], align 1
; CHECK-NEXT: ret i1 [[C]]
;
%s = ashr exact i8 %x, 3
%c = icmp ule i8 %s, 10
- store i8 %s, i8* %ptr
+ store i8 %s, ptr %ptr
ret i1 %c
}
ret <2 x i1> %r
}
-define i32 @sub_eq_zero_select(i32 %a, i32 %b, i32* %p) {
+define i32 @sub_eq_zero_select(i32 %a, i32 %b, ptr %p) {
; CHECK-LABEL: @sub_eq_zero_select(
; CHECK-NEXT: [[SUB:%.*]] = sub i32 [[A:%.*]], [[B:%.*]]
-; CHECK-NEXT: store i32 [[SUB]], i32* [[P:%.*]], align 4
+; CHECK-NEXT: store i32 [[SUB]], ptr [[P:%.*]], align 4
; CHECK-NEXT: ret i32 [[B]]
;
%sub = sub i32 %a, %b
- store i32 %sub, i32* %p
+ store i32 %sub, ptr %p
%cmp = icmp eq i32 %sub, 0
%sel = select i1 %cmp, i32 %a, i32 %b
ret i32 %sel
; CHECK-NEXT: ret i1 true
;
%idxprom = sext i32 %x to i64
- %idx = getelementptr inbounds [3 x i32], [3 x i32]* @a, i64 0, i64 %idxprom
- %t1 = load i32, i32* %idx, align 4
+ %idx = getelementptr inbounds [3 x i32], ptr @a, i64 0, i64 %idxprom
+ %t1 = load i32, ptr %idx, align 4
%conv1 = lshr i32 %t1, 1
%t2 = trunc i32 %conv1 to i8
%conv2 = and i8 %t2, 127
define <2 x i1> @PR27786(<2 x i8> %a) {
; CHECK-LABEL: @PR27786(
-; CHECK-NEXT: [[CMP:%.*]] = icmp sle <2 x i8> [[A:%.*]], bitcast (i16 ptrtoint (i32* @someglobal to i16) to <2 x i8>)
+; CHECK-NEXT: [[CMP:%.*]] = icmp sle <2 x i8> [[A:%.*]], bitcast (i16 ptrtoint (ptr @someglobal to i16) to <2 x i8>)
; CHECK-NEXT: ret <2 x i1> [[CMP]]
;
- %cmp = icmp sle <2 x i8> %a, bitcast (i16 ptrtoint (i32* @someglobal to i16) to <2 x i8>)
+ %cmp = icmp sle <2 x i8> %a, bitcast (i16 ptrtoint (ptr @someglobal to i16) to <2 x i8>)
ret <2 x i1> %cmp
}
define <2 x i1> @PR27786(<2 x i8> %a) {
; CHECK-LABEL: @PR27786(
-; CHECK-NEXT: [[CMP:%.*]] = icmp sle <2 x i8> [[A:%.*]], bitcast (i16 ptrtoint (i32* @someglobal to i16) to <2 x i8>)
+; CHECK-NEXT: [[CMP:%.*]] = icmp sle <2 x i8> [[A:%.*]], bitcast (i16 ptrtoint (ptr @someglobal to i16) to <2 x i8>)
; CHECK-NEXT: ret <2 x i1> [[CMP]]
;
- %cmp = icmp sle <2 x i8> %a, bitcast (i16 ptrtoint (i32* @someglobal to i16) to <2 x i8>)
+ %cmp = icmp sle <2 x i8> %a, bitcast (i16 ptrtoint (ptr @someglobal to i16) to <2 x i8>)
ret <2 x i1> %cmp
}
ret i1 %r
}
-define i1 @not_cast_ne-1_uses(<3 x i2> %x, <3 x i2>* %p) {
+define i1 @not_cast_ne-1_uses(<3 x i2> %x, ptr %p) {
; CHECK-LABEL: @not_cast_ne-1_uses(
; CHECK-NEXT: [[NOT:%.*]] = xor <3 x i2> [[X:%.*]], <i2 -1, i2 -1, i2 -1>
-; CHECK-NEXT: store <3 x i2> [[NOT]], <3 x i2>* [[P:%.*]], align 1
+; CHECK-NEXT: store <3 x i2> [[NOT]], ptr [[P:%.*]], align 1
; CHECK-NEXT: [[TMP1:%.*]] = bitcast <3 x i2> [[X]] to i6
; CHECK-NEXT: [[R:%.*]] = icmp ne i6 [[TMP1]], 0
; CHECK-NEXT: ret i1 [[R]]
;
%not = xor <3 x i2> %x, <i2 -1, i2 -1, i2 -1>
- store <3 x i2> %not, <3 x i2>* %p
+ store <3 x i2> %not, ptr %p
%b = bitcast <3 x i2> %not to i6
%r = icmp ne i6 %b, -1
ret i1 %r
; negative test - extra use
-define i1 @eq_cast_eq-1_use1(<2 x i4> %x, <2 x i4> %y, <2 x i1>* %p) {
+define i1 @eq_cast_eq-1_use1(<2 x i4> %x, <2 x i4> %y, ptr %p) {
; CHECK-LABEL: @eq_cast_eq-1_use1(
; CHECK-NEXT: [[IC:%.*]] = icmp sgt <2 x i4> [[X:%.*]], [[Y:%.*]]
-; CHECK-NEXT: store <2 x i1> [[IC]], <2 x i1>* [[P:%.*]], align 1
+; CHECK-NEXT: store <2 x i1> [[IC]], ptr [[P:%.*]], align 1
; CHECK-NEXT: [[B:%.*]] = bitcast <2 x i1> [[IC]] to i2
; CHECK-NEXT: [[R:%.*]] = icmp eq i2 [[B]], -1
; CHECK-NEXT: ret i1 [[R]]
;
%ic = icmp sgt <2 x i4> %x, %y
- store <2 x i1> %ic, <2 x i1>* %p
+ store <2 x i1> %ic, ptr %p
%b = bitcast <2 x i1> %ic to i2
%r = icmp eq i2 %b, -1
ret i1 %r
; negative test - extra use
-define i1 @eq_cast_eq-1_use2(<2 x i4> %x, <2 x i4> %y, i2* %p) {
+define i1 @eq_cast_eq-1_use2(<2 x i4> %x, <2 x i4> %y, ptr %p) {
; CHECK-LABEL: @eq_cast_eq-1_use2(
; CHECK-NEXT: [[IC:%.*]] = icmp sgt <2 x i4> [[X:%.*]], [[Y:%.*]]
; CHECK-NEXT: [[B:%.*]] = bitcast <2 x i1> [[IC]] to i2
-; CHECK-NEXT: [[TMP1:%.*]] = bitcast i2* [[P:%.*]] to <2 x i1>*
-; CHECK-NEXT: store <2 x i1> [[IC]], <2 x i1>* [[TMP1]], align 1
+; CHECK-NEXT: store <2 x i1> [[IC]], ptr [[P:%.*]], align 1
; CHECK-NEXT: [[R:%.*]] = icmp eq i2 [[B]], -1
; CHECK-NEXT: ret i1 [[R]]
;
%ic = icmp sgt <2 x i4> %x, %y
%b = bitcast <2 x i1> %ic to i2
- store i2 %b, i2* %p
+ store i2 %b, ptr %p
%r = icmp eq i2 %b, -1
ret i1 %r
}
; extra use of extend is ok
-define i1 @eq_cast_zext_use1(<5 x i3> %b, <5 x i7>* %p) {
+define i1 @eq_cast_zext_use1(<5 x i3> %b, ptr %p) {
; CHECK-LABEL: @eq_cast_zext_use1(
; CHECK-NEXT: [[E:%.*]] = zext <5 x i3> [[B:%.*]] to <5 x i7>
-; CHECK-NEXT: store <5 x i7> [[E]], <5 x i7>* [[P:%.*]], align 8
+; CHECK-NEXT: store <5 x i7> [[E]], ptr [[P:%.*]], align 8
; CHECK-NEXT: [[TMP1:%.*]] = bitcast <5 x i3> [[B]] to i15
; CHECK-NEXT: [[R:%.*]] = icmp eq i15 [[TMP1]], 0
; CHECK-NEXT: ret i1 [[R]]
;
%e = zext <5 x i3> %b to <5 x i7>
- store <5 x i7> %e, <5 x i7>* %p
+ store <5 x i7> %e, ptr %p
%bc = bitcast <5 x i7> %e to i35
%r = icmp eq i35 %bc, 0
ret i1 %r
ret i1 %r
}
-define i1 @eq_cast_eq_ptr-1(<2 x i4*> %x, <2 x i4*> %y) {
+define i1 @eq_cast_eq_ptr-1(<2 x ptr> %x, <2 x ptr> %y) {
; CHECK-LABEL: @eq_cast_eq_ptr-1(
-; CHECK-NEXT: [[IC:%.*]] = icmp ne <2 x i4*> [[X:%.*]], [[Y:%.*]]
+; CHECK-NEXT: [[IC:%.*]] = icmp ne <2 x ptr> [[X:%.*]], [[Y:%.*]]
; CHECK-NEXT: [[TMP1:%.*]] = bitcast <2 x i1> [[IC]] to i2
; CHECK-NEXT: [[R:%.*]] = icmp eq i2 [[TMP1]], 0
; CHECK-NEXT: ret i1 [[R]]
;
- %ic = icmp eq <2 x i4*> %x, %y
+ %ic = icmp eq <2 x ptr> %x, %y
%b = bitcast <2 x i1> %ic to i2
%r = icmp eq i2 %b, -1
ret i1 %r
}
-define i1 @eq_cast_ne_ptr-1(<2 x i4*> %x, <2 x i4*> %y) {
+define i1 @eq_cast_ne_ptr-1(<2 x ptr> %x, <2 x ptr> %y) {
; CHECK-LABEL: @eq_cast_ne_ptr-1(
-; CHECK-NEXT: [[IC:%.*]] = icmp ne <2 x i4*> [[X:%.*]], [[Y:%.*]]
+; CHECK-NEXT: [[IC:%.*]] = icmp ne <2 x ptr> [[X:%.*]], [[Y:%.*]]
; CHECK-NEXT: [[TMP1:%.*]] = bitcast <2 x i1> [[IC]] to i2
; CHECK-NEXT: [[R:%.*]] = icmp ne i2 [[TMP1]], 0
; CHECK-NEXT: ret i1 [[R]]
;
- %ic = icmp eq <2 x i4*> %x, %y
+ %ic = icmp eq <2 x ptr> %x, %y
%b = bitcast <2 x i1> %ic to i2
%r = icmp ne i2 %b, -1
ret i1 %r
; Note: offs can be negative, LLVM used to make an incorrect assumption that
; unsigned overflow does not happen during offset computation
-define i1 @test24_neg_offs(i32* %p, i64 %offs) {
+define i1 @test24_neg_offs(ptr %p, i64 %offs) {
; CHECK-LABEL: @test24_neg_offs(
; CHECK-NEXT: [[P1_IDX_NEG:%.*]] = mul i64 [[OFFS:%.*]], -4
; CHECK-NEXT: [[CMP:%.*]] = icmp eq i64 [[P1_IDX_NEG]], 8
; CHECK-NEXT: ret i1 [[CMP]]
;
- %p1 = getelementptr inbounds i32, i32* %p, i64 %offs
- %conv1 = ptrtoint i32* %p to i64
- %conv2 = ptrtoint i32* %p1 to i64
+ %p1 = getelementptr inbounds i32, ptr %p, i64 %offs
+ %conv1 = ptrtoint ptr %p to i64
+ %conv2 = ptrtoint ptr %p1 to i64
%delta = sub i64 %conv1, %conv2
%cmp = icmp eq i64 %delta, 8
ret i1 %cmp
declare i32 @test58_d(i64)
; Negative test: GEP inbounds may cross sign boundary.
-define i1 @test62(i8* %a) {
+define i1 @test62(ptr %a) {
; CHECK-LABEL: @test62(
-; CHECK-NEXT: [[ARRAYIDX1:%.*]] = getelementptr inbounds i8, i8* [[A:%.*]], i64 1
-; CHECK-NEXT: [[ARRAYIDX2:%.*]] = getelementptr inbounds i8, i8* [[A]], i64 10
-; CHECK-NEXT: [[CMP:%.*]] = icmp slt i8* [[ARRAYIDX1]], [[ARRAYIDX2]]
+; CHECK-NEXT: [[ARRAYIDX1:%.*]] = getelementptr inbounds i8, ptr [[A:%.*]], i64 1
+; CHECK-NEXT: [[ARRAYIDX2:%.*]] = getelementptr inbounds i8, ptr [[A]], i64 10
+; CHECK-NEXT: [[CMP:%.*]] = icmp slt ptr [[ARRAYIDX1]], [[ARRAYIDX2]]
; CHECK-NEXT: ret i1 [[CMP]]
;
- %arrayidx1 = getelementptr inbounds i8, i8* %a, i64 1
- %arrayidx2 = getelementptr inbounds i8, i8* %a, i64 10
- %cmp = icmp slt i8* %arrayidx1, %arrayidx2
+ %arrayidx1 = getelementptr inbounds i8, ptr %a, i64 1
+ %arrayidx2 = getelementptr inbounds i8, ptr %a, i64 10
+ %cmp = icmp slt ptr %arrayidx1, %arrayidx2
ret i1 %cmp
}
-define i1 @test62_as1(i8 addrspace(1)* %a) {
+define i1 @test62_as1(ptr addrspace(1) %a) {
; CHECK-LABEL: @test62_as1(
-; CHECK-NEXT: [[ARRAYIDX1:%.*]] = getelementptr inbounds i8, i8 addrspace(1)* [[A:%.*]], i16 1
-; CHECK-NEXT: [[ARRAYIDX2:%.*]] = getelementptr inbounds i8, i8 addrspace(1)* [[A]], i16 10
-; CHECK-NEXT: [[CMP:%.*]] = icmp slt i8 addrspace(1)* [[ARRAYIDX1]], [[ARRAYIDX2]]
+; CHECK-NEXT: [[ARRAYIDX1:%.*]] = getelementptr inbounds i8, ptr addrspace(1) [[A:%.*]], i16 1
+; CHECK-NEXT: [[ARRAYIDX2:%.*]] = getelementptr inbounds i8, ptr addrspace(1) [[A]], i16 10
+; CHECK-NEXT: [[CMP:%.*]] = icmp slt ptr addrspace(1) [[ARRAYIDX1]], [[ARRAYIDX2]]
; CHECK-NEXT: ret i1 [[CMP]]
;
- %arrayidx1 = getelementptr inbounds i8, i8 addrspace(1)* %a, i64 1
- %arrayidx2 = getelementptr inbounds i8, i8 addrspace(1)* %a, i64 10
- %cmp = icmp slt i8 addrspace(1)* %arrayidx1, %arrayidx2
+ %arrayidx1 = getelementptr inbounds i8, ptr addrspace(1) %a, i64 1
+ %arrayidx2 = getelementptr inbounds i8, ptr addrspace(1) %a, i64 10
+ %cmp = icmp slt ptr addrspace(1) %arrayidx1, %arrayidx2
ret i1 %cmp
}
ret <2 x i1> %r
}
-define i1 @xor_ult_extra_use(i8 %x, i8* %p) {
+define i1 @xor_ult_extra_use(i8 %x, ptr %p) {
; CHECK-LABEL: @xor_ult_extra_use(
; CHECK-NEXT: [[XOR:%.*]] = xor i8 [[X:%.*]], -32
-; CHECK-NEXT: store i8 [[XOR]], i8* [[P:%.*]], align 1
+; CHECK-NEXT: store i8 [[XOR]], ptr [[P:%.*]], align 1
; CHECK-NEXT: [[R:%.*]] = icmp ugt i8 [[X]], 31
; CHECK-NEXT: ret i1 [[R]]
;
%xor = xor i8 %x, -32
- store i8 %xor, i8* %p
+ store i8 %xor, ptr %p
%r = icmp ult i8 %xor, -32
ret i1 %r
}
ret <2 x i1> %r
}
-define i1 @xor_ugt_extra_use(i8 %x, i8* %p) {
+define i1 @xor_ugt_extra_use(i8 %x, ptr %p) {
; CHECK-LABEL: @xor_ugt_extra_use(
; CHECK-NEXT: [[XOR:%.*]] = xor i8 [[X:%.*]], 63
-; CHECK-NEXT: store i8 [[XOR]], i8* [[P:%.*]], align 1
+; CHECK-NEXT: store i8 [[XOR]], ptr [[P:%.*]], align 1
; CHECK-NEXT: [[R:%.*]] = icmp ugt i8 [[X]], 63
; CHECK-NEXT: ret i1 [[R]]
;
%xor = xor i8 %x, 63
- store i8 %xor, i8* %p
+ store i8 %xor, ptr %p
%r = icmp ugt i8 %xor, 63
ret i1 %r
}
ret i1 %cmp
}
-define zeroext i1 @icmp_cmpxchg_strong(i32* %sc, i32 %old_val, i32 %new_val) {
+define zeroext i1 @icmp_cmpxchg_strong(ptr %sc, i32 %old_val, i32 %new_val) {
; CHECK-LABEL: @icmp_cmpxchg_strong(
-; CHECK-NEXT: [[XCHG:%.*]] = cmpxchg i32* [[SC:%.*]], i32 [[OLD_VAL:%.*]], i32 [[NEW_VAL:%.*]] seq_cst seq_cst, align 4
+; CHECK-NEXT: [[XCHG:%.*]] = cmpxchg ptr [[SC:%.*]], i32 [[OLD_VAL:%.*]], i32 [[NEW_VAL:%.*]] seq_cst seq_cst, align 4
; CHECK-NEXT: [[ICMP:%.*]] = extractvalue { i32, i1 } [[XCHG]], 1
; CHECK-NEXT: ret i1 [[ICMP]]
;
- %xchg = cmpxchg i32* %sc, i32 %old_val, i32 %new_val seq_cst seq_cst
+ %xchg = cmpxchg ptr %sc, i32 %old_val, i32 %new_val seq_cst seq_cst
%xtrc = extractvalue { i32, i1 } %xchg, 0
%icmp = icmp eq i32 %xtrc, %old_val
ret i1 %icmp
define i1 @f10(i16 %p) {
; CHECK-LABEL: @f10(
-; CHECK-NEXT: [[CMP580:%.*]] = icmp uge i16 [[P:%.*]], mul (i16 zext (i8 ptrtoint (i1 (i16)* @f10 to i8) to i16), i16 zext (i8 ptrtoint (i1 (i16)* @f10 to i8) to i16))
+; CHECK-NEXT: [[CMP580:%.*]] = icmp uge i16 [[P:%.*]], mul (i16 zext (i8 ptrtoint (ptr @f10 to i8) to i16), i16 zext (i8 ptrtoint (ptr @f10 to i8) to i16))
; CHECK-NEXT: ret i1 [[CMP580]]
;
- %cmp580 = icmp ule i16 mul (i16 zext (i8 ptrtoint (i1 (i16)* @f10 to i8) to i16), i16 zext (i8 ptrtoint (i1 (i16)* @f10 to i8) to i16)), %p
+ %cmp580 = icmp ule i16 mul (i16 zext (i8 ptrtoint (ptr @f10 to i8) to i16), i16 zext (i8 ptrtoint (ptr @f10 to i8) to i16)), %p
ret i1 %cmp580
}
ret <2 x i1> %cmp
}
-define i1 @or_ptrtoint_mismatch(i8* %p, i32* %q) {
+define i1 @or_ptrtoint_mismatch(ptr %p, ptr %q) {
; CHECK-LABEL: @or_ptrtoint_mismatch(
-; CHECK-NEXT: [[TMP1:%.*]] = icmp eq i8* [[P:%.*]], null
-; CHECK-NEXT: [[TMP2:%.*]] = icmp eq i32* [[Q:%.*]], null
+; CHECK-NEXT: [[TMP1:%.*]] = icmp eq ptr [[P:%.*]], null
+; CHECK-NEXT: [[TMP2:%.*]] = icmp eq ptr [[Q:%.*]], null
; CHECK-NEXT: [[B:%.*]] = and i1 [[TMP1]], [[TMP2]]
; CHECK-NEXT: ret i1 [[B]]
;
- %pp = ptrtoint i8* %p to i64
- %qq = ptrtoint i32* %q to i64
+ %pp = ptrtoint ptr %p to i64
+ %qq = ptrtoint ptr %q to i64
%o = or i64 %pp, %qq
%b = icmp eq i64 %o, 0
ret i1 %b
; Don't crash by assuming the compared values are integers.
declare void @llvm.assume(i1)
-define i1 @PR35794(i32* %a) {
+define i1 @PR35794(ptr %a) {
; CHECK-LABEL: @PR35794(
-; CHECK-NEXT: [[MASKCOND:%.*]] = icmp eq i32* [[A:%.*]], null
+; CHECK-NEXT: [[MASKCOND:%.*]] = icmp eq ptr [[A:%.*]], null
; CHECK-NEXT: tail call void @llvm.assume(i1 [[MASKCOND]])
; CHECK-NEXT: ret i1 true
;
- %cmp = icmp sgt i32* %a, inttoptr (i64 -1 to i32*)
- %maskcond = icmp eq i32* %a, null
+ %cmp = icmp sgt ptr %a, inttoptr (i64 -1 to ptr)
+ %maskcond = icmp eq ptr %a, null
tail call void @llvm.assume(i1 %maskcond)
ret i1 %cmp
}
; Don't crash by assuming the compared values are integers.
-define <2 x i1> @PR36583(<2 x i8*>) {
+define <2 x i1> @PR36583(<2 x ptr>) {
; CHECK-LABEL: @PR36583(
-; CHECK-NEXT: [[RES:%.*]] = icmp eq <2 x i8*> [[TMP0:%.*]], zeroinitializer
+; CHECK-NEXT: [[RES:%.*]] = icmp eq <2 x ptr> [[TMP0:%.*]], zeroinitializer
; CHECK-NEXT: ret <2 x i1> [[RES]]
;
- %cast = ptrtoint <2 x i8*> %0 to <2 x i64>
+ %cast = ptrtoint <2 x ptr> %0 to <2 x i64>
%res = icmp eq <2 x i64> %cast, zeroinitializer
ret <2 x i1> %res
}
declare void @use_i1(i1)
declare void @use_i64(i64)
-define i1 @signbit_bitcast_fpext_extra_use(float %x, i64* %p) {
+define i1 @signbit_bitcast_fpext_extra_use(float %x, ptr %p) {
; CHECK-LABEL: @signbit_bitcast_fpext_extra_use(
; CHECK-NEXT: [[F:%.*]] = fpext float [[X:%.*]] to double
; CHECK-NEXT: [[B:%.*]] = bitcast double [[F]] to i64
; CHECK-LABEL: @pr47997(
; CHECK-NEXT: bb:
; CHECK-NEXT: [[I:%.*]] = add nsw i32 [[ARG:%.*]], -1
-; CHECK-NEXT: store i32 [[I]], i32* @x, align 4
+; CHECK-NEXT: store i32 [[I]], ptr @x, align 4
; CHECK-NEXT: [[I1:%.*]] = sub nsw i32 1, [[ARG]]
-; CHECK-NEXT: store i32 [[I1]], i32* @y, align 4
+; CHECK-NEXT: store i32 [[I1]], ptr @y, align 4
; CHECK-NEXT: ret i1 true
;
bb:
%i = add nsw i32 %arg, -1
- store i32 %i, i32* @x
+ store i32 %i, ptr @x
%i1 = sub nsw i32 1, %arg
- store i32 %i1, i32* @y
+ store i32 %i1, ptr @y
%i2 = sub nsw i32 0, %i1
%i3 = icmp eq i32 %i, %i2
ret i1 %i3
; Test that presence of range does not cause unprofitable transforms with bit
; arithmetics.
-define i1 @without_range(i32* %A) {
+define i1 @without_range(ptr %A) {
; CHECK-LABEL: @without_range(
-; CHECK-NEXT: [[A_VAL:%.*]] = load i32, i32* [[A:%.*]], align 8
+; CHECK-NEXT: [[A_VAL:%.*]] = load i32, ptr [[A:%.*]], align 8
; CHECK-NEXT: [[C:%.*]] = icmp slt i32 [[A_VAL]], 2
; CHECK-NEXT: ret i1 [[C]]
;
- %A.val = load i32, i32* %A, align 8
+ %A.val = load i32, ptr %A, align 8
%B = sdiv i32 %A.val, 2
%C = icmp sge i32 0, %B
ret i1 %C
}
-define i1 @with_range(i32* %A) {
+define i1 @with_range(ptr %A) {
; CHECK-LABEL: @with_range(
-; CHECK-NEXT: [[A_VAL:%.*]] = load i32, i32* [[A:%.*]], align 8, !range [[RNG0:![0-9]+]]
+; CHECK-NEXT: [[A_VAL:%.*]] = load i32, ptr [[A:%.*]], align 8, !range [[RNG0:![0-9]+]]
; CHECK-NEXT: [[C:%.*]] = icmp ult i32 [[A_VAL]], 2
; CHECK-NEXT: ret i1 [[C]]
;
- %A.val = load i32, i32* %A, align 8, !range !0
+ %A.val = load i32, ptr %A, align 8, !range !0
%B = sdiv i32 %A.val, 2
%C = icmp sge i32 0, %B
ret i1 %C
target datalayout = "e-p:32:32:32-i1:8:8-i8:8:8-i16:16:16-i32:32:32-i64:32:32-f32:32:32-f64:32:32-v64:64:64-v128:128:128-a0:0:64"
-define i32 *@test1(i32* %A, i32 %Offset) {
+define ptr@test1(ptr %A, i32 %Offset) {
; CHECK-LABEL: @test1(
; CHECK-NEXT: entry:
; CHECK-NEXT: br label [[BB:%.*]]
; CHECK-NEXT: [[COND:%.*]] = icmp sgt i32 [[RHS_IDX]], 100
; CHECK-NEXT: br i1 [[COND]], label [[BB2:%.*]], label [[BB]]
; CHECK: bb2:
-; CHECK-NEXT: [[RHS_PTR:%.*]] = getelementptr inbounds i32, i32* [[A:%.*]], i32 [[RHS_IDX]]
-; CHECK-NEXT: ret i32* [[RHS_PTR]]
+; CHECK-NEXT: [[RHS_PTR:%.*]] = getelementptr inbounds i32, ptr [[A:%.*]], i32 [[RHS_IDX]]
+; CHECK-NEXT: ret ptr [[RHS_PTR]]
;
entry:
- %tmp = getelementptr inbounds i32, i32* %A, i32 %Offset
+ %tmp = getelementptr inbounds i32, ptr %A, i32 %Offset
br label %bb
bb:
- %RHS = phi i32* [ %RHS.next, %bb ], [ %tmp, %entry ]
- %LHS = getelementptr inbounds i32, i32* %A, i32 100
- %RHS.next = getelementptr inbounds i32, i32* %RHS, i64 1
- %cond = icmp ult i32 * %LHS, %RHS
+ %RHS = phi ptr [ %RHS.next, %bb ], [ %tmp, %entry ]
+ %LHS = getelementptr inbounds i32, ptr %A, i32 100
+ %RHS.next = getelementptr inbounds i32, ptr %RHS, i64 1
+ %cond = icmp ult ptr %LHS, %RHS
br i1 %cond, label %bb2, label %bb
bb2:
- ret i32* %RHS
+ ret ptr %RHS
}
-define i32 *@test2(i32 %A, i32 %Offset) {
+define ptr@test2(i32 %A, i32 %Offset) {
; CHECK-LABEL: @test2(
; CHECK-NEXT: entry:
; CHECK-NEXT: br label [[BB:%.*]]
; CHECK-NEXT: [[COND:%.*]] = icmp sgt i32 [[RHS_IDX]], 100
; CHECK-NEXT: br i1 [[COND]], label [[BB2:%.*]], label [[BB]]
; CHECK: bb2:
-; CHECK-NEXT: [[RHSTO_PTR:%.*]] = inttoptr i32 [[A:%.*]] to i32*
-; CHECK-NEXT: [[RHS_PTR:%.*]] = getelementptr inbounds i32, i32* [[RHSTO_PTR]], i32 [[RHS_IDX]]
-; CHECK-NEXT: ret i32* [[RHS_PTR]]
+; CHECK-NEXT: [[RHSTO_PTR:%.*]] = inttoptr i32 [[A:%.*]] to ptr
+; CHECK-NEXT: [[RHS_PTR:%.*]] = getelementptr inbounds i32, ptr [[RHSTO_PTR]], i32 [[RHS_IDX]]
+; CHECK-NEXT: ret ptr [[RHS_PTR]]
;
entry:
- %A.ptr = inttoptr i32 %A to i32*
- %tmp = getelementptr inbounds i32, i32* %A.ptr, i32 %Offset
+ %A.ptr = inttoptr i32 %A to ptr
+ %tmp = getelementptr inbounds i32, ptr %A.ptr, i32 %Offset
br label %bb
bb:
- %RHS = phi i32* [ %RHS.next, %bb ], [ %tmp, %entry ]
- %LHS = getelementptr inbounds i32, i32* %A.ptr, i32 100
- %RHS.next = getelementptr inbounds i32, i32* %RHS, i64 1
- %cmp0 = ptrtoint i32 *%LHS to i32
- %cmp1 = ptrtoint i32 *%RHS to i32
+ %RHS = phi ptr [ %RHS.next, %bb ], [ %tmp, %entry ]
+ %LHS = getelementptr inbounds i32, ptr %A.ptr, i32 100
+ %RHS.next = getelementptr inbounds i32, ptr %RHS, i64 1
+ %cmp0 = ptrtoint ptr %LHS to i32
+ %cmp1 = ptrtoint ptr %RHS to i32
%cond = icmp ult i32 %cmp0, %cmp1
br i1 %cond, label %bb2, label %bb
bb2:
- ret i32* %RHS
+ ret ptr %RHS
}
; Perform the transformation only if we know that the GEPs used are inbounds.
-define i32 *@test3(i32* %A, i32 %Offset) {
+define ptr@test3(ptr %A, i32 %Offset) {
; CHECK-LABEL: @test3(
; CHECK-NEXT: entry:
-; CHECK-NEXT: [[TMP:%.*]] = getelementptr i32, i32* [[A:%.*]], i32 [[OFFSET:%.*]]
+; CHECK-NEXT: [[TMP:%.*]] = getelementptr i32, ptr [[A:%.*]], i32 [[OFFSET:%.*]]
; CHECK-NEXT: br label [[BB:%.*]]
; CHECK: bb:
-; CHECK-NEXT: [[RHS:%.*]] = phi i32* [ [[RHS_NEXT:%.*]], [[BB]] ], [ [[TMP]], [[ENTRY:%.*]] ]
-; CHECK-NEXT: [[LHS:%.*]] = getelementptr i32, i32* [[A]], i32 100
-; CHECK-NEXT: [[RHS_NEXT]] = getelementptr i32, i32* [[RHS]], i32 1
-; CHECK-NEXT: [[COND:%.*]] = icmp ult i32* [[LHS]], [[RHS]]
+; CHECK-NEXT: [[RHS:%.*]] = phi ptr [ [[RHS_NEXT:%.*]], [[BB]] ], [ [[TMP]], [[ENTRY:%.*]] ]
+; CHECK-NEXT: [[LHS:%.*]] = getelementptr i32, ptr [[A]], i32 100
+; CHECK-NEXT: [[RHS_NEXT]] = getelementptr i32, ptr [[RHS]], i32 1
+; CHECK-NEXT: [[COND:%.*]] = icmp ult ptr [[LHS]], [[RHS]]
; CHECK-NEXT: br i1 [[COND]], label [[BB2:%.*]], label [[BB]]
; CHECK: bb2:
-; CHECK-NEXT: ret i32* [[RHS]]
+; CHECK-NEXT: ret ptr [[RHS]]
;
entry:
- %tmp = getelementptr i32, i32* %A, i32 %Offset
+ %tmp = getelementptr i32, ptr %A, i32 %Offset
br label %bb
bb:
- %RHS = phi i32* [ %RHS.next, %bb ], [ %tmp, %entry ]
- %LHS = getelementptr i32, i32* %A, i32 100
- %RHS.next = getelementptr i32, i32* %RHS, i64 1
- %cond = icmp ult i32 * %LHS, %RHS
+ %RHS = phi ptr [ %RHS.next, %bb ], [ %tmp, %entry ]
+ %LHS = getelementptr i32, ptr %A, i32 100
+ %RHS.next = getelementptr i32, ptr %RHS, i64 1
+ %cond = icmp ult ptr %LHS, %RHS
br i1 %cond, label %bb2, label %bb
bb2:
- ret i32* %RHS
+ ret ptr %RHS
}
; An inttoptr that requires an extension or truncation will be opaque when determining
; the base pointer. In this case we can still perform the transformation by considering
; A.ptr as being the base pointer.
-define i32 *@test4(i16 %A, i32 %Offset) {
+define ptr@test4(i16 %A, i32 %Offset) {
; CHECK-LABEL: @test4(
; CHECK-NEXT: entry:
; CHECK-NEXT: br label [[BB:%.*]]
; CHECK-NEXT: br i1 [[COND]], label [[BB2:%.*]], label [[BB]]
; CHECK: bb2:
; CHECK-NEXT: [[TMP0:%.*]] = zext i16 [[A:%.*]] to i32
-; CHECK-NEXT: [[RHSTO_PTR:%.*]] = inttoptr i32 [[TMP0]] to i32*
-; CHECK-NEXT: [[RHS_PTR:%.*]] = getelementptr inbounds i32, i32* [[RHSTO_PTR]], i32 [[RHS_IDX]]
-; CHECK-NEXT: ret i32* [[RHS_PTR]]
+; CHECK-NEXT: [[RHSTO_PTR:%.*]] = inttoptr i32 [[TMP0]] to ptr
+; CHECK-NEXT: [[RHS_PTR:%.*]] = getelementptr inbounds i32, ptr [[RHSTO_PTR]], i32 [[RHS_IDX]]
+; CHECK-NEXT: ret ptr [[RHS_PTR]]
;
entry:
- %A.ptr = inttoptr i16 %A to i32*
- %tmp = getelementptr inbounds i32, i32* %A.ptr, i32 %Offset
+ %A.ptr = inttoptr i16 %A to ptr
+ %tmp = getelementptr inbounds i32, ptr %A.ptr, i32 %Offset
br label %bb
bb:
- %RHS = phi i32* [ %RHS.next, %bb ], [ %tmp, %entry ]
- %LHS = getelementptr inbounds i32, i32* %A.ptr, i32 100
- %RHS.next = getelementptr inbounds i32, i32* %RHS, i64 1
- %cmp0 = ptrtoint i32 *%LHS to i32
- %cmp1 = ptrtoint i32 *%RHS to i32
+ %RHS = phi ptr [ %RHS.next, %bb ], [ %tmp, %entry ]
+ %LHS = getelementptr inbounds i32, ptr %A.ptr, i32 100
+ %RHS.next = getelementptr inbounds i32, ptr %RHS, i64 1
+ %cmp0 = ptrtoint ptr %LHS to i32
+ %cmp1 = ptrtoint ptr %RHS to i32
%cond = icmp ult i32 %cmp0, %cmp1
br i1 %cond, label %bb2, label %bb
bb2:
- ret i32* %RHS
+ ret ptr %RHS
}
-declare i32* @fun_ptr()
+declare ptr @fun_ptr()
-define i32 *@test5(i32 %Offset) personality i8* bitcast (i32 (...)* @__gxx_personality_v0 to i8*) {
+define ptr@test5(i32 %Offset) personality ptr @__gxx_personality_v0 {
; CHECK-LABEL: @test5(
; CHECK-NEXT: entry:
-; CHECK-NEXT: [[A:%.*]] = invoke i32* @fun_ptr()
+; CHECK-NEXT: [[A:%.*]] = invoke ptr @fun_ptr()
; CHECK-NEXT: to label [[CONT:%.*]] unwind label [[LPAD:%.*]]
; CHECK: cont:
; CHECK-NEXT: br label [[BB:%.*]]
; CHECK-NEXT: [[COND:%.*]] = icmp sgt i32 [[RHS_IDX]], 100
; CHECK-NEXT: br i1 [[COND]], label [[BB2:%.*]], label [[BB]]
; CHECK: bb2:
-; CHECK-NEXT: [[RHS_PTR:%.*]] = getelementptr inbounds i32, i32* [[A]], i32 [[RHS_IDX]]
-; CHECK-NEXT: ret i32* [[RHS_PTR]]
+; CHECK-NEXT: [[RHS_PTR:%.*]] = getelementptr inbounds i32, ptr [[A]], i32 [[RHS_IDX]]
+; CHECK-NEXT: ret ptr [[RHS_PTR]]
; CHECK: lpad:
-; CHECK-NEXT: [[L:%.*]] = landingpad { i8*, i32 }
+; CHECK-NEXT: [[L:%.*]] = landingpad { ptr, i32 }
; CHECK-NEXT: cleanup
-; CHECK-NEXT: ret i32* null
+; CHECK-NEXT: ret ptr null
;
entry:
- %A = invoke i32 *@fun_ptr() to label %cont unwind label %lpad
+ %A = invoke ptr@fun_ptr() to label %cont unwind label %lpad
cont:
- %tmp = getelementptr inbounds i32, i32* %A, i32 %Offset
+ %tmp = getelementptr inbounds i32, ptr %A, i32 %Offset
br label %bb
bb:
- %RHS = phi i32* [ %RHS.next, %bb ], [ %tmp, %cont ]
- %LHS = getelementptr inbounds i32, i32* %A, i32 100
- %RHS.next = getelementptr inbounds i32, i32* %RHS, i64 1
- %cond = icmp ult i32 * %LHS, %RHS
+ %RHS = phi ptr [ %RHS.next, %bb ], [ %tmp, %cont ]
+ %LHS = getelementptr inbounds i32, ptr %A, i32 100
+ %RHS.next = getelementptr inbounds i32, ptr %RHS, i64 1
+ %cond = icmp ult ptr %LHS, %RHS
br i1 %cond, label %bb2, label %bb
bb2:
- ret i32* %RHS
+ ret ptr %RHS
lpad:
- %l = landingpad { i8*, i32 } cleanup
- ret i32* null
+ %l = landingpad { ptr, i32 } cleanup
+ ret ptr null
}
declare i32 @fun_i32()
-define i32 *@test6(i32 %Offset) personality i8* bitcast (i32 (...)* @__gxx_personality_v0 to i8*) {
+define ptr@test6(i32 %Offset) personality ptr @__gxx_personality_v0 {
; CHECK-LABEL: @test6(
; CHECK-NEXT: entry:
; CHECK-NEXT: [[A:%.*]] = invoke i32 @fun_i32()
; CHECK-NEXT: [[COND:%.*]] = icmp sgt i32 [[RHS_IDX]], 100
; CHECK-NEXT: br i1 [[COND]], label [[BB2:%.*]], label [[BB]]
; CHECK: bb2:
-; CHECK-NEXT: [[RHSTO_PTR:%.*]] = inttoptr i32 [[A]] to i32*
-; CHECK-NEXT: [[RHS_PTR:%.*]] = getelementptr inbounds i32, i32* [[RHSTO_PTR]], i32 [[RHS_IDX]]
-; CHECK-NEXT: ret i32* [[RHS_PTR]]
+; CHECK-NEXT: [[RHSTO_PTR:%.*]] = inttoptr i32 [[A]] to ptr
+; CHECK-NEXT: [[RHS_PTR:%.*]] = getelementptr inbounds i32, ptr [[RHSTO_PTR]], i32 [[RHS_IDX]]
+; CHECK-NEXT: ret ptr [[RHS_PTR]]
; CHECK: lpad:
-; CHECK-NEXT: [[L:%.*]] = landingpad { i8*, i32 }
+; CHECK-NEXT: [[L:%.*]] = landingpad { ptr, i32 }
; CHECK-NEXT: cleanup
-; CHECK-NEXT: ret i32* null
+; CHECK-NEXT: ret ptr null
;
entry:
%A = invoke i32 @fun_i32() to label %cont unwind label %lpad
cont:
- %A.ptr = inttoptr i32 %A to i32*
- %tmp = getelementptr inbounds i32, i32* %A.ptr, i32 %Offset
+ %A.ptr = inttoptr i32 %A to ptr
+ %tmp = getelementptr inbounds i32, ptr %A.ptr, i32 %Offset
br label %bb
bb:
- %RHS = phi i32* [ %RHS.next, %bb ], [ %tmp, %cont ]
- %LHS = getelementptr inbounds i32, i32* %A.ptr, i32 100
- %RHS.next = getelementptr inbounds i32, i32* %RHS, i64 1
- %cond = icmp ult i32 * %LHS, %RHS
+ %RHS = phi ptr [ %RHS.next, %bb ], [ %tmp, %cont ]
+ %LHS = getelementptr inbounds i32, ptr %A.ptr, i32 100
+ %RHS.next = getelementptr inbounds i32, ptr %RHS, i64 1
+ %cond = icmp ult ptr %LHS, %RHS
br i1 %cond, label %bb2, label %bb
bb2:
- ret i32* %RHS
+ ret ptr %RHS
lpad:
- %l = landingpad { i8*, i32 } cleanup
- ret i32* null
+ %l = landingpad { ptr, i32 } cleanup
+ ret ptr null
}
br label %bb7
bb7: ; preds = %bb10, %entry-block
- %phi = phi i64* [ @pr30402, %entry ], [ getelementptr inbounds (i64, i64* @pr30402, i32 1), %bb7 ]
- %cmp = icmp eq i64* %phi, getelementptr inbounds (i64, i64* @pr30402, i32 1)
+ %phi = phi ptr [ @pr30402, %entry ], [ getelementptr inbounds (i64, ptr @pr30402, i32 1), %bb7 ]
+ %cmp = icmp eq ptr %phi, getelementptr inbounds (i64, ptr @pr30402, i32 1)
br i1 %cmp, label %bb10, label %bb7
bb10:
declare i32 @__gxx_personality_v0(...)
-define i1 @test8(i64* %in, i64 %offset) {
+define i1 @test8(ptr %in, i64 %offset) {
; CHECK-LABEL: @test8(
; CHECK-NEXT: entry:
-; CHECK-NEXT: [[LD:%.*]] = load i64, i64* [[IN:%.*]], align 8
+; CHECK-NEXT: [[LD:%.*]] = load i64, ptr [[IN:%.*]], align 8
; CHECK-NEXT: [[TMP0:%.*]] = trunc i64 [[LD]] to i32
-; CHECK-NEXT: [[CASTI8:%.*]] = inttoptr i32 [[TMP0]] to i8*
+; CHECK-NEXT: [[CASTI8:%.*]] = inttoptr i32 [[TMP0]] to ptr
; CHECK-NEXT: [[TMP1:%.*]] = trunc i64 [[OFFSET:%.*]] to i32
-; CHECK-NEXT: [[GEPI8:%.*]] = getelementptr inbounds i8, i8* [[CASTI8]], i32 [[TMP1]]
-; CHECK-NEXT: [[CAST:%.*]] = bitcast i8* [[GEPI8]] to i32**
+; CHECK-NEXT: [[GEPI8:%.*]] = getelementptr inbounds i8, ptr [[CASTI8]], i32 [[TMP1]]
; CHECK-NEXT: [[TMP2:%.*]] = trunc i64 [[LD]] to i32
-; CHECK-NEXT: [[PTRCAST:%.*]] = inttoptr i32 [[TMP2]] to i32**
-; CHECK-NEXT: [[GEPI32:%.*]] = getelementptr inbounds i32*, i32** [[PTRCAST]], i32 1
-; CHECK-NEXT: [[CMP:%.*]] = icmp eq i32** [[GEPI32]], [[CAST]]
+; CHECK-NEXT: [[PTRCAST:%.*]] = inttoptr i32 [[TMP2]] to ptr
+; CHECK-NEXT: [[GEPI32:%.*]] = getelementptr inbounds ptr, ptr [[PTRCAST]], i32 1
+; CHECK-NEXT: [[CMP:%.*]] = icmp eq ptr [[GEPI32]], [[GEPI8]]
; CHECK-NEXT: ret i1 [[CMP]]
;
entry:
- %ld = load i64, i64* %in, align 8
- %casti8 = inttoptr i64 %ld to i8*
- %gepi8 = getelementptr inbounds i8, i8* %casti8, i64 %offset
- %cast = bitcast i8* %gepi8 to i32**
- %ptrcast = inttoptr i64 %ld to i32**
- %gepi32 = getelementptr inbounds i32*, i32** %ptrcast, i64 1
- %cmp = icmp eq i32** %gepi32, %cast
+ %ld = load i64, ptr %in, align 8
+ %casti8 = inttoptr i64 %ld to ptr
+ %gepi8 = getelementptr inbounds i8, ptr %casti8, i64 %offset
+ %ptrcast = inttoptr i64 %ld to ptr
+ %gepi32 = getelementptr inbounds ptr, ptr %ptrcast, i64 1
+ %cmp = icmp eq ptr %gepi32, %gepi8
ret i1 %cmp
}
-define void @test_zero_offset_cycle({ i64, i64 }* %arg) {
+define void @test_zero_offset_cycle(ptr %arg) {
; CHECK-LABEL: @test_zero_offset_cycle(
; CHECK-NEXT: entry:
; CHECK-NEXT: br label [[LOOP:%.*]]
; CHECK-NEXT: br label [[LOOP]]
;
entry:
- %gep = getelementptr inbounds { i64, i64 }, { i64, i64 }* %arg, i32 0, i32 1
- %gep.int = ptrtoint i64* %gep to i32
+ %gep = getelementptr inbounds { i64, i64 }, ptr %arg, i32 0, i32 1
+ %gep.int = ptrtoint ptr %gep to i32
br label %loop
loop:
%phi = phi i32 [ %gep.int, %entry ], [ %gep.int2, %loop.cont ], [ %phi, %loop ]
- %phi.ptr = inttoptr i32 %phi to i64*
- %cmp = icmp eq i64* %gep, %phi.ptr
+ %phi.ptr = inttoptr i32 %phi to ptr
+ %cmp = icmp eq ptr %gep, %phi.ptr
br i1 %cmp, label %loop, label %loop.cont
loop.cont:
- %gep.int2 = ptrtoint i64* %gep to i32
+ %gep.int2 = ptrtoint ptr %gep to i32
br label %loop
}
; PR26354: https://llvm.org/bugs/show_bug.cgi?id=26354
; Don't create a shufflevector if we know that we're not going to replace the insertelement.
-define double @pr26354(<2 x double>* %tmp, i1 %B) {
+define double @pr26354(ptr %tmp, i1 %B) {
; CHECK-LABEL: @pr26354(
; CHECK-NEXT: entry:
-; CHECK-NEXT: [[LD:%.*]] = load <2 x double>, <2 x double>* [[TMP:%.*]], align 16
+; CHECK-NEXT: [[LD:%.*]] = load <2 x double>, ptr [[TMP:%.*]], align 16
; CHECK-NEXT: br i1 [[B:%.*]], label [[IF:%.*]], label [[END:%.*]]
; CHECK: if:
; CHECK-NEXT: [[E2:%.*]] = extractelement <2 x double> [[LD]], i64 1
;
entry:
- %ld = load <2 x double>, <2 x double>* %tmp
+ %ld = load <2 x double>, ptr %tmp
%e1 = extractelement <2 x double> %ld, i32 0
%e2 = extractelement <2 x double> %ld, i32 1
br i1 %B, label %if, label %end
; CHECK-LABEL: @PR30923(
; CHECK-NEXT: bb1:
; CHECK-NEXT: [[EXT1:%.*]] = extractelement <2 x float> [[X:%.*]], i64 1
-; CHECK-NEXT: store float [[EXT1]], float* undef, align 4
+; CHECK-NEXT: store float [[EXT1]], ptr undef, align 4
; CHECK-NEXT: br label [[BB2:%.*]]
; CHECK: bb2:
; CHECK-NEXT: [[EXT2:%.*]] = extractelement <2 x float> [[X]], i64 0
;
bb1:
%ext1 = extractelement <2 x float> %x, i32 1
- store float %ext1, float* undef, align 4
+ store float %ext1, ptr undef, align 4
br label %bb2
bb2:
; PR26354: https://llvm.org/bugs/show_bug.cgi?id=26354
; Don't create a shufflevector if we know that we're not going to replace the insertelement.
-define double @pr26354(<2 x double>* %tmp, i1 %B) {
+define double @pr26354(ptr %tmp, i1 %B) {
; CHECK-LABEL: @pr26354(
; CHECK-NEXT: entry:
-; CHECK-NEXT: [[LD:%.*]] = load <2 x double>, <2 x double>* [[TMP:%.*]], align 16
+; CHECK-NEXT: [[LD:%.*]] = load <2 x double>, ptr [[TMP:%.*]], align 16
; CHECK-NEXT: br i1 [[B:%.*]], label [[IF:%.*]], label [[END:%.*]]
; CHECK: if:
; CHECK-NEXT: [[E2:%.*]] = extractelement <2 x double> [[LD]], i64 1
;
entry:
- %ld = load <2 x double>, <2 x double>* %tmp
+ %ld = load <2 x double>, ptr %tmp
%e1 = extractelement <2 x double> %ld, i32 0
%e2 = extractelement <2 x double> %ld, i32 1
br i1 %B, label %if, label %end
; CHECK-LABEL: @PR30923(
; CHECK-NEXT: bb1:
; CHECK-NEXT: [[EXT1:%.*]] = extractelement <2 x float> [[X:%.*]], i64 1
-; CHECK-NEXT: store float [[EXT1]], float* undef, align 4
+; CHECK-NEXT: store float [[EXT1]], ptr undef, align 4
; CHECK-NEXT: br label [[BB2:%.*]]
; CHECK: bb2:
; CHECK-NEXT: [[EXT2:%.*]] = extractelement <2 x float> [[X]], i64 0
;
bb1:
%ext1 = extractelement <2 x float> %x, i32 1
- store float %ext1, float* undef, align 4
+ store float %ext1, ptr undef, align 4
br label %bb2
bb2:
; CHECK-NOT: insertvalue
; CHECK-NOT: extractelement
; CHECK: store <2 x double>
-define void @julia_2xdouble([2 x double]* sret([2 x double]), <2 x double>*) {
+define void @julia_2xdouble(ptr sret([2 x double]), ptr) {
top:
- %x = load <2 x double>, <2 x double>* %1
+ %x = load <2 x double>, ptr %1
%x0 = extractelement <2 x double> %x, i32 0
%i0 = insertvalue [2 x double] undef, double %x0, 0
%x1 = extractelement <2 x double> %x, i32 1
%i1 = insertvalue [2 x double] %i0, double %x1, 1
- store [2 x double] %i1, [2 x double]* %0, align 4
+ store [2 x double] %i1, ptr %0, align 4
ret void
}
; CHECK-NOT: insertvalue
; CHECK-NOT: extractelement
; CHECK: store <2 x i64>
-define void @julia_2xi64([2 x i64]* sret([2 x i64]), <2 x i64>*) {
+define void @julia_2xi64(ptr sret([2 x i64]), ptr) {
top:
- %x = load <2 x i64>, <2 x i64>* %1
+ %x = load <2 x i64>, ptr %1
%x0 = extractelement <2 x i64> %x, i32 1
%i0 = insertvalue [2 x i64] undef, i64 %x0, 0
%x1 = extractelement <2 x i64> %x, i32 1
%i1 = insertvalue [2 x i64] %i0, i64 %x1, 1
%x2 = extractelement <2 x i64> %x, i32 0
%i2 = insertvalue [2 x i64] %i1, i64 %x2, 0
- store [2 x i64] %i2, [2 x i64]* %0, align 4
+ store [2 x i64] %i2, ptr %0, align 4
ret void
}
; CHECK-NOT: insertvalue
; CHECK-NOT: extractelement
; CHECK: store <4 x float>
-define void @julia_4xfloat([4 x float]* sret([4 x float]), <4 x float>*) {
+define void @julia_4xfloat(ptr sret([4 x float]), ptr) {
top:
- %x = load <4 x float>, <4 x float>* %1
+ %x = load <4 x float>, ptr %1
%x0 = extractelement <4 x float> %x, i32 0
%i0 = insertvalue [4 x float] undef, float %x0, 0
%x1 = extractelement <4 x float> %x, i32 1
%i2 = insertvalue [4 x float] %i1, float %x2, 2
%x3 = extractelement <4 x float> %x, i32 3
%i3 = insertvalue [4 x float] %i2, float %x3, 3
- store [4 x float] %i3, [4 x float]* %0, align 4
+ store [4 x float] %i3, ptr %0, align 4
ret void
}
; CHECK-NOT: insertvalue
; CHECK-NOT: extractelement
; CHECK: store <4 x float>
-define void @julia_pseudovec(%pseudovec* sret(%pseudovec), <4 x float>*) {
+define void @julia_pseudovec(ptr sret(%pseudovec), ptr) {
top:
- %x = load <4 x float>, <4 x float>* %1
+ %x = load <4 x float>, ptr %1
%x0 = extractelement <4 x float> %x, i32 0
%i0 = insertvalue %pseudovec undef, float %x0, 0
%x1 = extractelement <4 x float> %x, i32 1
%i2 = insertvalue %pseudovec %i1, float %x2, 2
%x3 = extractelement <4 x float> %x, i32 3
%i3 = insertvalue %pseudovec %i2, float %x3, 3
- store %pseudovec %i3, %pseudovec* %0, align 4
+ store %pseudovec %i3, ptr %0, align 4
ret void
}
; CHECK-LABEL: s2l
; CHECK-NOT: load
-define float @s2l(float* %p) {
- store float 0.0, float* %p
+define float @s2l(ptr %p) {
+ store float 0.0, ptr %p
call void @llvm.sideeffect()
- %t = load float, float* %p
+ %t = load float, ptr %p
ret float %t
}
; for (int j = 0; j < n;j+=1)
; for (int k = 0; k < n; k += 1)
; for (int l = 0; l < n; l += 1) {
-; double *p = &A[i + j + k + l];
+; ptr p = &A[i + j + k + l];
; double x = *p;
; double y = *p;
; arg(x + y);
declare void @arg(double)
-define void @func(i64 %n, double* noalias nonnull %A) {
+define void @func(i64 %n, ptr noalias nonnull %A) {
entry:
br label %for.cond
%add20 = add nsw i32 %add, %k.0
%add21 = add nsw i32 %add20, %l.0
%idxprom = sext i32 %add21 to i64
- %arrayidx = getelementptr inbounds double, double* %A, i64 %idxprom
- %0 = load double, double* %arrayidx, align 8, !llvm.access.group !1
- %1 = load double, double* %arrayidx, align 8, !llvm.access.group !2
+ %arrayidx = getelementptr inbounds double, ptr %A, i64 %idxprom
+ %0 = load double, ptr %arrayidx, align 8, !llvm.access.group !1
+ %1 = load double, ptr %arrayidx, align 8, !llvm.access.group !2
%add22 = fadd double %0, %1
call void @arg(double %add22), !llvm.access.group !3
%add23 = add nsw i32 %l.0, 1
; NOTE: Assertions have been autogenerated by utils/update_test_checks.py
; RUN: opt < %s -passes=instcombine -S | FileCheck %s
-define void @test1(float* %a, float* readnone %a_end, i64* %b.i64) {
+define void @test1(ptr %a, ptr readnone %a_end, ptr %b.i64) {
; CHECK-LABEL: @test1(
; CHECK-NEXT: entry:
-; CHECK-NEXT: [[CMP1:%.*]] = icmp ult float* [[A:%.*]], [[A_END:%.*]]
+; CHECK-NEXT: [[CMP1:%.*]] = icmp ult ptr [[A:%.*]], [[A_END:%.*]]
; CHECK-NEXT: br i1 [[CMP1]], label [[FOR_BODY_PREHEADER:%.*]], label [[FOR_END:%.*]]
; CHECK: for.body.preheader:
-; CHECK-NEXT: [[B:%.*]] = load i64, i64* [[B_I64:%.*]], align 8
-; CHECK-NEXT: [[B_PTR:%.*]] = inttoptr i64 [[B]] to float*
+; CHECK-NEXT: [[B:%.*]] = load i64, ptr [[B_I64:%.*]], align 8
+; CHECK-NEXT: [[B_PTR:%.*]] = inttoptr i64 [[B]] to ptr
; CHECK-NEXT: br label [[FOR_BODY:%.*]]
; CHECK: for.body:
-; CHECK-NEXT: [[A_ADDR_03:%.*]] = phi float* [ [[INCDEC_PTR:%.*]], [[FOR_BODY]] ], [ [[A]], [[FOR_BODY_PREHEADER]] ]
-; CHECK-NEXT: [[B_ADDR_02_PTR:%.*]] = phi float* [ [[ADD:%.*]], [[FOR_BODY]] ], [ [[B_PTR]], [[FOR_BODY_PREHEADER]] ]
-; CHECK-NEXT: [[I1:%.*]] = load float, float* [[B_ADDR_02_PTR]], align 4
+; CHECK-NEXT: [[A_ADDR_03:%.*]] = phi ptr [ [[INCDEC_PTR:%.*]], [[FOR_BODY]] ], [ [[A]], [[FOR_BODY_PREHEADER]] ]
+; CHECK-NEXT: [[B_ADDR_02_PTR:%.*]] = phi ptr [ [[ADD:%.*]], [[FOR_BODY]] ], [ [[B_PTR]], [[FOR_BODY_PREHEADER]] ]
+; CHECK-NEXT: [[I1:%.*]] = load float, ptr [[B_ADDR_02_PTR]], align 4
; CHECK-NEXT: [[MUL_I:%.*]] = fmul float [[I1]], 4.200000e+01
-; CHECK-NEXT: store float [[MUL_I]], float* [[A_ADDR_03]], align 4
-; CHECK-NEXT: [[ADD]] = getelementptr inbounds float, float* [[B_ADDR_02_PTR]], i64 1
-; CHECK-NEXT: [[INCDEC_PTR]] = getelementptr inbounds float, float* [[A_ADDR_03]], i64 1
-; CHECK-NEXT: [[CMP:%.*]] = icmp ult float* [[INCDEC_PTR]], [[A_END]]
+; CHECK-NEXT: store float [[MUL_I]], ptr [[A_ADDR_03]], align 4
+; CHECK-NEXT: [[ADD]] = getelementptr inbounds float, ptr [[B_ADDR_02_PTR]], i64 1
+; CHECK-NEXT: [[INCDEC_PTR]] = getelementptr inbounds float, ptr [[A_ADDR_03]], i64 1
+; CHECK-NEXT: [[CMP:%.*]] = icmp ult ptr [[INCDEC_PTR]], [[A_END]]
; CHECK-NEXT: br i1 [[CMP]], label [[FOR_BODY]], label [[FOR_END]]
; CHECK: for.end:
; CHECK-NEXT: ret void
;
entry:
- %cmp1 = icmp ult float* %a, %a_end
+ %cmp1 = icmp ult ptr %a, %a_end
br i1 %cmp1, label %for.body.preheader, label %for.end
for.body.preheader: ; preds = %entry
- %b = load i64, i64* %b.i64, align 8
+ %b = load i64, ptr %b.i64, align 8
br label %for.body
for.body: ; preds = %for.body, %for.body.preheader
- %a.addr.03 = phi float* [ %incdec.ptr, %for.body ], [ %a, %for.body.preheader ]
+ %a.addr.03 = phi ptr [ %incdec.ptr, %for.body ], [ %a, %for.body.preheader ]
%b.addr.02 = phi i64 [ %add.int, %for.body ], [ %b, %for.body.preheader ]
- %tmp = inttoptr i64 %b.addr.02 to float*
- %i1 = load float, float* %tmp, align 4
+ %tmp = inttoptr i64 %b.addr.02 to ptr
+ %i1 = load float, ptr %tmp, align 4
%mul.i = fmul float %i1, 4.200000e+01
- store float %mul.i, float* %a.addr.03, align 4
- %add = getelementptr inbounds float, float* %tmp, i64 1
- %add.int = ptrtoint float* %add to i64
- %incdec.ptr = getelementptr inbounds float, float* %a.addr.03, i64 1
- %cmp = icmp ult float* %incdec.ptr, %a_end
+ store float %mul.i, ptr %a.addr.03, align 4
+ %add = getelementptr inbounds float, ptr %tmp, i64 1
+ %add.int = ptrtoint ptr %add to i64
+ %incdec.ptr = getelementptr inbounds float, ptr %a.addr.03, i64 1
+ %cmp = icmp ult ptr %incdec.ptr, %a_end
br i1 %cmp, label %for.body, label %for.end
for.end: ; preds = %for.body, %entry
ret void
}
-define void @test1_neg(float* %a, float* readnone %a_end, i64* %b.i64) {
+define void @test1_neg(ptr %a, ptr readnone %a_end, ptr %b.i64) {
; CHECK-LABEL: @test1_neg(
; CHECK-NEXT: entry:
-; CHECK-NEXT: [[CMP1:%.*]] = icmp ult float* [[A:%.*]], [[A_END:%.*]]
+; CHECK-NEXT: [[CMP1:%.*]] = icmp ult ptr [[A:%.*]], [[A_END:%.*]]
; CHECK-NEXT: br i1 [[CMP1]], label [[FOR_BODY_PREHEADER:%.*]], label [[FOR_END:%.*]]
; CHECK: for.body.preheader:
-; CHECK-NEXT: [[B:%.*]] = load i64, i64* [[B_I64:%.*]], align 8
+; CHECK-NEXT: [[B:%.*]] = load i64, ptr [[B_I64:%.*]], align 8
; CHECK-NEXT: br label [[FOR_BODY:%.*]]
; CHECK: for.body:
-; CHECK-NEXT: [[A_ADDR_03:%.*]] = phi float* [ [[INCDEC_PTR:%.*]], [[BB:%.*]] ], [ [[A]], [[FOR_BODY_PREHEADER]] ]
+; CHECK-NEXT: [[A_ADDR_03:%.*]] = phi ptr [ [[INCDEC_PTR:%.*]], [[BB:%.*]] ], [ [[A]], [[FOR_BODY_PREHEADER]] ]
; CHECK-NEXT: [[B_ADDR_02:%.*]] = phi i64 [ [[ADD_INT:%.*]], [[BB]] ], [ [[B]], [[FOR_BODY_PREHEADER]] ]
-; CHECK-NEXT: [[TMP:%.*]] = inttoptr i64 [[B_ADDR_02]] to float*
-; CHECK-NEXT: [[PTRCMP:%.*]] = icmp ult float* [[TMP]], [[A_END]]
+; CHECK-NEXT: [[TMP:%.*]] = inttoptr i64 [[B_ADDR_02]] to ptr
+; CHECK-NEXT: [[PTRCMP:%.*]] = icmp ult ptr [[TMP]], [[A_END]]
; CHECK-NEXT: br i1 [[PTRCMP]], label [[FOR_END]], label [[BB]]
; CHECK: bb:
-; CHECK-NEXT: [[I1:%.*]] = load float, float* [[A]], align 4
+; CHECK-NEXT: [[I1:%.*]] = load float, ptr [[A]], align 4
; CHECK-NEXT: [[MUL_I:%.*]] = fmul float [[I1]], 4.200000e+01
-; CHECK-NEXT: store float [[MUL_I]], float* [[A_ADDR_03]], align 4
-; CHECK-NEXT: [[ADD:%.*]] = getelementptr inbounds float, float* [[A]], i64 1
-; CHECK-NEXT: [[ADD_INT]] = ptrtoint float* [[ADD]] to i64
-; CHECK-NEXT: [[INCDEC_PTR]] = getelementptr inbounds float, float* [[A_ADDR_03]], i64 1
-; CHECK-NEXT: [[CMP:%.*]] = icmp ult float* [[INCDEC_PTR]], [[A_END]]
+; CHECK-NEXT: store float [[MUL_I]], ptr [[A_ADDR_03]], align 4
+; CHECK-NEXT: [[ADD:%.*]] = getelementptr inbounds float, ptr [[A]], i64 1
+; CHECK-NEXT: [[ADD_INT]] = ptrtoint ptr [[ADD]] to i64
+; CHECK-NEXT: [[INCDEC_PTR]] = getelementptr inbounds float, ptr [[A_ADDR_03]], i64 1
+; CHECK-NEXT: [[CMP:%.*]] = icmp ult ptr [[INCDEC_PTR]], [[A_END]]
; CHECK-NEXT: br i1 [[CMP]], label [[FOR_BODY]], label [[FOR_END]]
; CHECK: for.end:
; CHECK-NEXT: ret void
;
entry:
- %cmp1 = icmp ult float* %a, %a_end
+ %cmp1 = icmp ult ptr %a, %a_end
br i1 %cmp1, label %for.body.preheader, label %for.end
for.body.preheader: ; preds = %entry
- %b = load i64, i64* %b.i64, align 8
+ %b = load i64, ptr %b.i64, align 8
br label %for.body
for.body: ; preds = %for.body, %for.body.preheader
- %a.addr.03 = phi float* [ %incdec.ptr, %bb ], [ %a, %for.body.preheader ]
+ %a.addr.03 = phi ptr [ %incdec.ptr, %bb ], [ %a, %for.body.preheader ]
%b.addr.02 = phi i64 [ %add.int, %bb ], [ %b, %for.body.preheader ]
- %tmp = inttoptr i64 %b.addr.02 to float*
- %ptrcmp = icmp ult float* %tmp, %a_end
+ %tmp = inttoptr i64 %b.addr.02 to ptr
+ %ptrcmp = icmp ult ptr %tmp, %a_end
br i1 %ptrcmp, label %for.end, label %bb
bb:
- %i1 = load float, float* %a, align 4
+ %i1 = load float, ptr %a, align 4
%mul.i = fmul float %i1, 4.200000e+01
- store float %mul.i, float* %a.addr.03, align 4
- %add = getelementptr inbounds float, float* %a, i64 1
- %add.int = ptrtoint float* %add to i64
- %incdec.ptr = getelementptr inbounds float, float* %a.addr.03, i64 1
- %cmp = icmp ult float* %incdec.ptr, %a_end
+ store float %mul.i, ptr %a.addr.03, align 4
+ %add = getelementptr inbounds float, ptr %a, i64 1
+ %add.int = ptrtoint ptr %add to i64
+ %incdec.ptr = getelementptr inbounds float, ptr %a.addr.03, i64 1
+ %cmp = icmp ult ptr %incdec.ptr, %a_end
br i1 %cmp, label %for.body, label %for.end
for.end: ; preds = %for.body, %entry
}
-define void @test2(float* %a, float* readnone %a_end, float** %b.float) {
+define void @test2(ptr %a, ptr readnone %a_end, ptr %b.float) {
; CHECK-LABEL: @test2(
; CHECK-NEXT: entry:
-; CHECK-NEXT: [[CMP1:%.*]] = icmp ult float* [[A:%.*]], [[A_END:%.*]]
+; CHECK-NEXT: [[CMP1:%.*]] = icmp ult ptr [[A:%.*]], [[A_END:%.*]]
; CHECK-NEXT: br i1 [[CMP1]], label [[FOR_BODY_PREHEADER:%.*]], label [[FOR_END:%.*]]
; CHECK: for.body.preheader:
-; CHECK-NEXT: [[B_I64:%.*]] = bitcast float** [[B_FLOAT:%.*]] to i64*
-; CHECK-NEXT: [[B:%.*]] = load i64, i64* [[B_I64]], align 8
-; CHECK-NEXT: [[B_PTR:%.*]] = inttoptr i64 [[B]] to float*
+; CHECK-NEXT: [[B:%.*]] = load i64, ptr [[B_FLOAT:%.*]], align 8
+; CHECK-NEXT: [[B_PTR:%.*]] = inttoptr i64 [[B]] to ptr
; CHECK-NEXT: br label [[FOR_BODY:%.*]]
; CHECK: for.body:
-; CHECK-NEXT: [[A_ADDR_03:%.*]] = phi float* [ [[INCDEC_PTR:%.*]], [[FOR_BODY]] ], [ [[A]], [[FOR_BODY_PREHEADER]] ]
-; CHECK-NEXT: [[B_ADDR_02_PTR:%.*]] = phi float* [ [[ADD:%.*]], [[FOR_BODY]] ], [ [[B_PTR]], [[FOR_BODY_PREHEADER]] ]
-; CHECK-NEXT: [[I1:%.*]] = load float, float* [[B_ADDR_02_PTR]], align 4
+; CHECK-NEXT: [[A_ADDR_03:%.*]] = phi ptr [ [[INCDEC_PTR:%.*]], [[FOR_BODY]] ], [ [[A]], [[FOR_BODY_PREHEADER]] ]
+; CHECK-NEXT: [[B_ADDR_02_PTR:%.*]] = phi ptr [ [[ADD:%.*]], [[FOR_BODY]] ], [ [[B_PTR]], [[FOR_BODY_PREHEADER]] ]
+; CHECK-NEXT: [[I1:%.*]] = load float, ptr [[B_ADDR_02_PTR]], align 4
; CHECK-NEXT: [[MUL_I:%.*]] = fmul float [[I1]], 4.200000e+01
-; CHECK-NEXT: store float [[MUL_I]], float* [[A_ADDR_03]], align 4
-; CHECK-NEXT: [[ADD]] = getelementptr inbounds float, float* [[B_ADDR_02_PTR]], i64 1
-; CHECK-NEXT: [[INCDEC_PTR]] = getelementptr inbounds float, float* [[A_ADDR_03]], i64 1
-; CHECK-NEXT: [[CMP:%.*]] = icmp ult float* [[INCDEC_PTR]], [[A_END]]
+; CHECK-NEXT: store float [[MUL_I]], ptr [[A_ADDR_03]], align 4
+; CHECK-NEXT: [[ADD]] = getelementptr inbounds float, ptr [[B_ADDR_02_PTR]], i64 1
+; CHECK-NEXT: [[INCDEC_PTR]] = getelementptr inbounds float, ptr [[A_ADDR_03]], i64 1
+; CHECK-NEXT: [[CMP:%.*]] = icmp ult ptr [[INCDEC_PTR]], [[A_END]]
; CHECK-NEXT: br i1 [[CMP]], label [[FOR_BODY]], label [[FOR_END]]
; CHECK: for.end:
; CHECK-NEXT: ret void
;
entry:
- %cmp1 = icmp ult float* %a, %a_end
+ %cmp1 = icmp ult ptr %a, %a_end
br i1 %cmp1, label %for.body.preheader, label %for.end
for.body.preheader: ; preds = %entry
- %b.i64 = bitcast float** %b.float to i64*
- %b = load i64, i64* %b.i64, align 8
+ %b = load i64, ptr %b.float, align 8
br label %for.body
for.body: ; preds = %for.body, %for.body.preheader
- %a.addr.03 = phi float* [ %incdec.ptr, %for.body ], [ %a, %for.body.preheader ]
+ %a.addr.03 = phi ptr [ %incdec.ptr, %for.body ], [ %a, %for.body.preheader ]
%b.addr.02 = phi i64 [ %add.int, %for.body ], [ %b, %for.body.preheader ]
- %tmp = inttoptr i64 %b.addr.02 to float*
- %i1 = load float, float* %tmp, align 4
+ %tmp = inttoptr i64 %b.addr.02 to ptr
+ %i1 = load float, ptr %tmp, align 4
%mul.i = fmul float %i1, 4.200000e+01
- store float %mul.i, float* %a.addr.03, align 4
- %add = getelementptr inbounds float, float* %tmp, i64 1
- %add.int = ptrtoint float* %add to i64
- %incdec.ptr = getelementptr inbounds float, float* %a.addr.03, i64 1
- %cmp = icmp ult float* %incdec.ptr, %a_end
+ store float %mul.i, ptr %a.addr.03, align 4
+ %add = getelementptr inbounds float, ptr %tmp, i64 1
+ %add.int = ptrtoint ptr %add to i64
+ %incdec.ptr = getelementptr inbounds float, ptr %a.addr.03, i64 1
+ %cmp = icmp ult ptr %incdec.ptr, %a_end
br i1 %cmp, label %for.body, label %for.end
for.end: ; preds = %for.body, %entry
}
-define void @test3(float* %a, float* readnone %a_end, i8** %b.i8p) {
+define void @test3(ptr %a, ptr readnone %a_end, ptr %b.i8p) {
; CHECK-LABEL: @test3(
; CHECK-NEXT: entry:
-; CHECK-NEXT: [[CMP1:%.*]] = icmp ult float* [[A:%.*]], [[A_END:%.*]]
+; CHECK-NEXT: [[CMP1:%.*]] = icmp ult ptr [[A:%.*]], [[A_END:%.*]]
; CHECK-NEXT: br i1 [[CMP1]], label [[FOR_BODY_PREHEADER:%.*]], label [[FOR_END:%.*]]
; CHECK: for.body.preheader:
-; CHECK-NEXT: [[B_I64:%.*]] = bitcast i8** [[B_I8P:%.*]] to i64*
-; CHECK-NEXT: [[B:%.*]] = load i64, i64* [[B_I64]], align 8
-; CHECK-NEXT: [[B_PTR:%.*]] = inttoptr i64 [[B]] to float*
+; CHECK-NEXT: [[B:%.*]] = load i64, ptr [[B_I8P:%.*]], align 8
+; CHECK-NEXT: [[B_PTR:%.*]] = inttoptr i64 [[B]] to ptr
; CHECK-NEXT: br label [[FOR_BODY:%.*]]
; CHECK: for.body:
-; CHECK-NEXT: [[A_ADDR_03:%.*]] = phi float* [ [[INCDEC_PTR:%.*]], [[FOR_BODY]] ], [ [[A]], [[FOR_BODY_PREHEADER]] ]
-; CHECK-NEXT: [[B_ADDR_02_PTR:%.*]] = phi float* [ [[ADD:%.*]], [[FOR_BODY]] ], [ [[B_PTR]], [[FOR_BODY_PREHEADER]] ]
-; CHECK-NEXT: [[I1:%.*]] = load float, float* [[B_ADDR_02_PTR]], align 4
+; CHECK-NEXT: [[A_ADDR_03:%.*]] = phi ptr [ [[INCDEC_PTR:%.*]], [[FOR_BODY]] ], [ [[A]], [[FOR_BODY_PREHEADER]] ]
+; CHECK-NEXT: [[B_ADDR_02_PTR:%.*]] = phi ptr [ [[ADD:%.*]], [[FOR_BODY]] ], [ [[B_PTR]], [[FOR_BODY_PREHEADER]] ]
+; CHECK-NEXT: [[I1:%.*]] = load float, ptr [[B_ADDR_02_PTR]], align 4
; CHECK-NEXT: [[MUL_I:%.*]] = fmul float [[I1]], 4.200000e+01
-; CHECK-NEXT: store float [[MUL_I]], float* [[A_ADDR_03]], align 4
-; CHECK-NEXT: [[ADD]] = getelementptr inbounds float, float* [[B_ADDR_02_PTR]], i64 1
-; CHECK-NEXT: [[INCDEC_PTR]] = getelementptr inbounds float, float* [[A_ADDR_03]], i64 1
-; CHECK-NEXT: [[CMP:%.*]] = icmp ult float* [[INCDEC_PTR]], [[A_END]]
+; CHECK-NEXT: store float [[MUL_I]], ptr [[A_ADDR_03]], align 4
+; CHECK-NEXT: [[ADD]] = getelementptr inbounds float, ptr [[B_ADDR_02_PTR]], i64 1
+; CHECK-NEXT: [[INCDEC_PTR]] = getelementptr inbounds float, ptr [[A_ADDR_03]], i64 1
+; CHECK-NEXT: [[CMP:%.*]] = icmp ult ptr [[INCDEC_PTR]], [[A_END]]
; CHECK-NEXT: br i1 [[CMP]], label [[FOR_BODY]], label [[FOR_END]]
; CHECK: for.end:
; CHECK-NEXT: ret void
;
entry:
- %cmp1 = icmp ult float* %a, %a_end
+ %cmp1 = icmp ult ptr %a, %a_end
br i1 %cmp1, label %for.body.preheader, label %for.end
for.body.preheader: ; preds = %entry
- %b.i64 = bitcast i8** %b.i8p to i64*
- %b = load i64, i64* %b.i64, align 8
+ %b = load i64, ptr %b.i8p, align 8
br label %for.body
for.body: ; preds = %for.body, %for.body.preheader
- %a.addr.03 = phi float* [ %incdec.ptr, %for.body ], [ %a, %for.body.preheader ]
+ %a.addr.03 = phi ptr [ %incdec.ptr, %for.body ], [ %a, %for.body.preheader ]
%b.addr.02 = phi i64 [ %add.int, %for.body ], [ %b, %for.body.preheader ]
- %tmp = inttoptr i64 %b.addr.02 to float*
- %i1 = load float, float* %tmp, align 4
+ %tmp = inttoptr i64 %b.addr.02 to ptr
+ %i1 = load float, ptr %tmp, align 4
%mul.i = fmul float %i1, 4.200000e+01
- store float %mul.i, float* %a.addr.03, align 4
- %add = getelementptr inbounds float, float* %tmp, i64 1
- %add.int = ptrtoint float* %add to i64
- %incdec.ptr = getelementptr inbounds float, float* %a.addr.03, i64 1
- %cmp = icmp ult float* %incdec.ptr, %a_end
+ store float %mul.i, ptr %a.addr.03, align 4
+ %add = getelementptr inbounds float, ptr %tmp, i64 1
+ %add.int = ptrtoint ptr %add to i64
+ %incdec.ptr = getelementptr inbounds float, ptr %a.addr.03, i64 1
+ %cmp = icmp ult ptr %incdec.ptr, %a_end
br i1 %cmp, label %for.body, label %for.end
for.end: ; preds = %for.body, %entry
}
-define void @test4(float* %a, float* readnone %a_end, float** %b.float) {
+define void @test4(ptr %a, ptr readnone %a_end, ptr %b.float) {
; CHECK-LABEL: @test4(
; CHECK-NEXT: entry:
-; CHECK-NEXT: [[CMP1:%.*]] = icmp ult float* [[A:%.*]], [[A_END:%.*]]
+; CHECK-NEXT: [[CMP1:%.*]] = icmp ult ptr [[A:%.*]], [[A_END:%.*]]
; CHECK-NEXT: br i1 [[CMP1]], label [[FOR_BODY_PREHEADER:%.*]], label [[FOR_END:%.*]]
; CHECK: for.body.preheader:
-; CHECK-NEXT: [[B_F:%.*]] = load float*, float** [[B_FLOAT:%.*]], align 8
+; CHECK-NEXT: [[B_F:%.*]] = load ptr, ptr [[B_FLOAT:%.*]], align 8
; CHECK-NEXT: br label [[FOR_BODY:%.*]]
; CHECK: for.body:
-; CHECK-NEXT: [[A_ADDR_03:%.*]] = phi float* [ [[INCDEC_PTR:%.*]], [[FOR_BODY]] ], [ [[A]], [[FOR_BODY_PREHEADER]] ]
-; CHECK-NEXT: [[B_ADDR_02_IN:%.*]] = phi float* [ [[ADD:%.*]], [[FOR_BODY]] ], [ [[B_F]], [[FOR_BODY_PREHEADER]] ]
-; CHECK-NEXT: [[I1:%.*]] = load float, float* [[B_ADDR_02_IN]], align 4
+; CHECK-NEXT: [[A_ADDR_03:%.*]] = phi ptr [ [[INCDEC_PTR:%.*]], [[FOR_BODY]] ], [ [[A]], [[FOR_BODY_PREHEADER]] ]
+; CHECK-NEXT: [[B_ADDR_02_IN:%.*]] = phi ptr [ [[ADD:%.*]], [[FOR_BODY]] ], [ [[B_F]], [[FOR_BODY_PREHEADER]] ]
+; CHECK-NEXT: [[I1:%.*]] = load float, ptr [[B_ADDR_02_IN]], align 4
; CHECK-NEXT: [[MUL_I:%.*]] = fmul float [[I1]], 4.200000e+01
-; CHECK-NEXT: store float [[MUL_I]], float* [[A_ADDR_03]], align 4
-; CHECK-NEXT: [[ADD]] = getelementptr inbounds float, float* [[B_ADDR_02_IN]], i64 1
-; CHECK-NEXT: [[INCDEC_PTR]] = getelementptr inbounds float, float* [[A_ADDR_03]], i64 1
-; CHECK-NEXT: [[CMP:%.*]] = icmp ult float* [[INCDEC_PTR]], [[A_END]]
+; CHECK-NEXT: store float [[MUL_I]], ptr [[A_ADDR_03]], align 4
+; CHECK-NEXT: [[ADD]] = getelementptr inbounds float, ptr [[B_ADDR_02_IN]], i64 1
+; CHECK-NEXT: [[INCDEC_PTR]] = getelementptr inbounds float, ptr [[A_ADDR_03]], i64 1
+; CHECK-NEXT: [[CMP:%.*]] = icmp ult ptr [[INCDEC_PTR]], [[A_END]]
; CHECK-NEXT: br i1 [[CMP]], label [[FOR_BODY]], label [[FOR_END]]
; CHECK: for.end:
; CHECK-NEXT: ret void
;
entry:
- %cmp1 = icmp ult float* %a, %a_end
+ %cmp1 = icmp ult ptr %a, %a_end
br i1 %cmp1, label %for.body.preheader, label %for.end
for.body.preheader: ; preds = %entry
- %b.f = load float*, float** %b.float, align 8
- %b = ptrtoint float* %b.f to i64
+ %b.f = load ptr, ptr %b.float, align 8
+ %b = ptrtoint ptr %b.f to i64
br label %for.body
for.body: ; preds = %for.body, %for.body.preheader
- %a.addr.03 = phi float* [ %incdec.ptr, %for.body ], [ %a, %for.body.preheader ]
+ %a.addr.03 = phi ptr [ %incdec.ptr, %for.body ], [ %a, %for.body.preheader ]
%b.addr.02 = phi i64 [ %add.int, %for.body ], [ %b, %for.body.preheader ]
- %tmp = inttoptr i64 %b.addr.02 to float*
- %i1 = load float, float* %tmp, align 4
+ %tmp = inttoptr i64 %b.addr.02 to ptr
+ %i1 = load float, ptr %tmp, align 4
%mul.i = fmul float %i1, 4.200000e+01
- store float %mul.i, float* %a.addr.03, align 4
- %add = getelementptr inbounds float, float* %tmp, i64 1
- %add.int = ptrtoint float* %add to i64
- %incdec.ptr = getelementptr inbounds float, float* %a.addr.03, i64 1
- %cmp = icmp ult float* %incdec.ptr, %a_end
+ store float %mul.i, ptr %a.addr.03, align 4
+ %add = getelementptr inbounds float, ptr %tmp, i64 1
+ %add.int = ptrtoint ptr %add to i64
+ %incdec.ptr = getelementptr inbounds float, ptr %a.addr.03, i64 1
+ %cmp = icmp ult ptr %incdec.ptr, %a_end
br i1 %cmp, label %for.body, label %for.end
for.end: ; preds = %for.body, %entry
; NOTE: Assertions have been autogenerated by utils/update_test_checks.py
; RUN: opt < %s -passes=instcombine -S | FileCheck %s
-define void @test(float* %a, float* readnone %a_end, i64 %b) unnamed_addr {
+define void @test(ptr %a, ptr readnone %a_end, i64 %b) unnamed_addr {
; CHECK-LABEL: @test(
; CHECK-NEXT: entry:
-; CHECK-NEXT: [[CMP1:%.*]] = icmp ult float* [[A:%.*]], [[A_END:%.*]]
+; CHECK-NEXT: [[CMP1:%.*]] = icmp ult ptr [[A:%.*]], [[A_END:%.*]]
; CHECK-NEXT: br i1 [[CMP1]], label [[FOR_BODY_PREHEADER:%.*]], label [[FOR_END:%.*]]
; CHECK: for.body.preheader:
-; CHECK-NEXT: [[B_FLOAT:%.*]] = inttoptr i64 [[B:%.*]] to float*
+; CHECK-NEXT: [[B_FLOAT:%.*]] = inttoptr i64 [[B:%.*]] to ptr
; CHECK-NEXT: br label [[FOR_BODY:%.*]]
; CHECK: for.body:
-; CHECK-NEXT: [[A_ADDR_03:%.*]] = phi float* [ [[INCDEC_PTR:%.*]], [[FOR_BODY]] ], [ [[A]], [[FOR_BODY_PREHEADER]] ]
-; CHECK-NEXT: [[B_ADDR_FLOAT:%.*]] = phi float* [ [[B_ADDR_FLOAT_INC:%.*]], [[FOR_BODY]] ], [ [[B_FLOAT]], [[FOR_BODY_PREHEADER]] ]
-; CHECK-NEXT: [[L:%.*]] = load float, float* [[B_ADDR_FLOAT]], align 4
+; CHECK-NEXT: [[A_ADDR_03:%.*]] = phi ptr [ [[INCDEC_PTR:%.*]], [[FOR_BODY]] ], [ [[A]], [[FOR_BODY_PREHEADER]] ]
+; CHECK-NEXT: [[B_ADDR_FLOAT:%.*]] = phi ptr [ [[B_ADDR_FLOAT_INC:%.*]], [[FOR_BODY]] ], [ [[B_FLOAT]], [[FOR_BODY_PREHEADER]] ]
+; CHECK-NEXT: [[L:%.*]] = load float, ptr [[B_ADDR_FLOAT]], align 4
; CHECK-NEXT: [[MUL_I:%.*]] = fmul float [[L]], 4.200000e+01
-; CHECK-NEXT: store float [[MUL_I]], float* [[A_ADDR_03]], align 4
-; CHECK-NEXT: [[B_ADDR_FLOAT_INC]] = getelementptr inbounds float, float* [[B_ADDR_FLOAT]], i64 1
-; CHECK-NEXT: [[INCDEC_PTR]] = getelementptr inbounds float, float* [[A_ADDR_03]], i64 1
-; CHECK-NEXT: [[CMP:%.*]] = icmp ult float* [[INCDEC_PTR]], [[A_END]]
+; CHECK-NEXT: store float [[MUL_I]], ptr [[A_ADDR_03]], align 4
+; CHECK-NEXT: [[B_ADDR_FLOAT_INC]] = getelementptr inbounds float, ptr [[B_ADDR_FLOAT]], i64 1
+; CHECK-NEXT: [[INCDEC_PTR]] = getelementptr inbounds float, ptr [[A_ADDR_03]], i64 1
+; CHECK-NEXT: [[CMP:%.*]] = icmp ult ptr [[INCDEC_PTR]], [[A_END]]
; CHECK-NEXT: br i1 [[CMP]], label [[FOR_BODY]], label [[FOR_END]]
; CHECK: for.end:
; CHECK-NEXT: ret void
;
entry:
- %cmp1 = icmp ult float* %a, %a_end
+ %cmp1 = icmp ult ptr %a, %a_end
br i1 %cmp1, label %for.body.preheader, label %for.end
for.body.preheader: ; preds = %entry
- %b.float = inttoptr i64 %b to float*
+ %b.float = inttoptr i64 %b to ptr
br label %for.body
for.body: ; preds = %for.body.preheader, %for.body
- %a.addr.03 = phi float* [ %incdec.ptr, %for.body ], [ %a, %for.body.preheader ]
- %b.addr.float = phi float* [ %b.addr.float.inc, %for.body ], [ %b.float, %for.body.preheader ]
+ %a.addr.03 = phi ptr [ %incdec.ptr, %for.body ], [ %a, %for.body.preheader ]
+ %b.addr.float = phi ptr [ %b.addr.float.inc, %for.body ], [ %b.float, %for.body.preheader ]
%b.addr.i64 = phi i64 [ %b.addr.i64.inc, %for.body ], [ %b, %for.body.preheader ]
- %l = load float, float* %b.addr.float, align 4
+ %l = load float, ptr %b.addr.float, align 4
%mul.i = fmul float %l, 4.200000e+01
- store float %mul.i, float* %a.addr.03, align 4
- %b.addr.float.2 = inttoptr i64 %b.addr.i64 to float*
- %b.addr.float.inc = getelementptr inbounds float, float* %b.addr.float.2, i64 1
- %b.addr.i64.inc = ptrtoint float* %b.addr.float.inc to i64
- %incdec.ptr = getelementptr inbounds float, float* %a.addr.03, i64 1
- %cmp = icmp ult float* %incdec.ptr, %a_end
+ store float %mul.i, ptr %a.addr.03, align 4
+ %b.addr.float.2 = inttoptr i64 %b.addr.i64 to ptr
+ %b.addr.float.inc = getelementptr inbounds float, ptr %b.addr.float.2, i64 1
+ %b.addr.i64.inc = ptrtoint ptr %b.addr.float.inc to i64
+ %incdec.ptr = getelementptr inbounds float, ptr %a.addr.03, i64 1
+ %cmp = icmp ult ptr %incdec.ptr, %a_end
br i1 %cmp, label %for.body, label %for.end
for.end: ; preds = %for.body, %entry
; NOTE: Assertions have been autogenerated by utils/update_test_checks.py
; RUN: opt < %s -passes=instcombine -S | FileCheck %s
-define void @test(float* %a, float* readnone %a_end, i64 %b, float* %bf) unnamed_addr {
+define void @test(ptr %a, ptr readnone %a_end, i64 %b, ptr %bf) unnamed_addr {
; CHECK-LABEL: @test(
; CHECK-NEXT: entry:
-; CHECK-NEXT: [[CMP1:%.*]] = icmp ult float* [[A:%.*]], [[A_END:%.*]]
-; CHECK-NEXT: [[B_FLOAT:%.*]] = inttoptr i64 [[B:%.*]] to float*
+; CHECK-NEXT: [[CMP1:%.*]] = icmp ult ptr [[A:%.*]], [[A_END:%.*]]
+; CHECK-NEXT: [[B_FLOAT:%.*]] = inttoptr i64 [[B:%.*]] to ptr
; CHECK-NEXT: br i1 [[CMP1]], label [[BB1:%.*]], label [[BB2:%.*]]
; CHECK: bb1:
; CHECK-NEXT: br label [[FOR_BODY_PREHEADER:%.*]]
; CHECK: bb2:
-; CHECK-NEXT: [[BFI:%.*]] = ptrtoint float* [[BF:%.*]] to i64
+; CHECK-NEXT: [[BFI:%.*]] = ptrtoint ptr [[BF:%.*]] to i64
; CHECK-NEXT: br label [[FOR_BODY_PREHEADER]]
; CHECK: for.body.preheader:
; CHECK-NEXT: [[B_PHI:%.*]] = phi i64 [ [[B]], [[BB1]] ], [ [[BFI]], [[BB2]] ]
-; CHECK-NEXT: [[B_PHI_PTR:%.*]] = inttoptr i64 [[B_PHI]] to float*
+; CHECK-NEXT: [[B_PHI_PTR:%.*]] = inttoptr i64 [[B_PHI]] to ptr
; CHECK-NEXT: br label [[FOR_BODY:%.*]]
; CHECK: for.body:
-; CHECK-NEXT: [[A_ADDR_03:%.*]] = phi float* [ [[INCDEC_PTR:%.*]], [[FOR_BODY]] ], [ [[A]], [[FOR_BODY_PREHEADER]] ]
-; CHECK-NEXT: [[B_ADDR_FLOAT:%.*]] = phi float* [ [[B_ADDR_FLOAT_INC:%.*]], [[FOR_BODY]] ], [ [[B_FLOAT]], [[FOR_BODY_PREHEADER]] ]
-; CHECK-NEXT: [[B_ADDR_I64_PTR:%.*]] = phi float* [ [[B_ADDR_FLOAT_INC]], [[FOR_BODY]] ], [ [[B_PHI_PTR]], [[FOR_BODY_PREHEADER]] ]
-; CHECK-NEXT: [[L:%.*]] = load float, float* [[B_ADDR_FLOAT]], align 4
+; CHECK-NEXT: [[A_ADDR_03:%.*]] = phi ptr [ [[INCDEC_PTR:%.*]], [[FOR_BODY]] ], [ [[A]], [[FOR_BODY_PREHEADER]] ]
+; CHECK-NEXT: [[B_ADDR_FLOAT:%.*]] = phi ptr [ [[B_ADDR_FLOAT_INC:%.*]], [[FOR_BODY]] ], [ [[B_FLOAT]], [[FOR_BODY_PREHEADER]] ]
+; CHECK-NEXT: [[B_ADDR_I64_PTR:%.*]] = phi ptr [ [[B_ADDR_FLOAT_INC]], [[FOR_BODY]] ], [ [[B_PHI_PTR]], [[FOR_BODY_PREHEADER]] ]
+; CHECK-NEXT: [[L:%.*]] = load float, ptr [[B_ADDR_FLOAT]], align 4
; CHECK-NEXT: [[MUL_I:%.*]] = fmul float [[L]], 4.200000e+01
-; CHECK-NEXT: store float [[MUL_I]], float* [[A_ADDR_03]], align 4
-; CHECK-NEXT: [[B_ADDR_FLOAT_INC]] = getelementptr inbounds float, float* [[B_ADDR_I64_PTR]], i64 1
-; CHECK-NEXT: [[INCDEC_PTR]] = getelementptr inbounds float, float* [[A_ADDR_03]], i64 1
-; CHECK-NEXT: [[CMP:%.*]] = icmp ult float* [[INCDEC_PTR]], [[A_END]]
+; CHECK-NEXT: store float [[MUL_I]], ptr [[A_ADDR_03]], align 4
+; CHECK-NEXT: [[B_ADDR_FLOAT_INC]] = getelementptr inbounds float, ptr [[B_ADDR_I64_PTR]], i64 1
+; CHECK-NEXT: [[INCDEC_PTR]] = getelementptr inbounds float, ptr [[A_ADDR_03]], i64 1
+; CHECK-NEXT: [[CMP:%.*]] = icmp ult ptr [[INCDEC_PTR]], [[A_END]]
; CHECK-NEXT: br i1 [[CMP]], label [[FOR_BODY]], label [[FOR_END:%.*]]
; CHECK: for.end:
; CHECK-NEXT: ret void
;
entry:
- %cmp1 = icmp ult float* %a, %a_end
- %b.float = inttoptr i64 %b to float*
+ %cmp1 = icmp ult ptr %a, %a_end
+ %b.float = inttoptr i64 %b to ptr
br i1 %cmp1, label %bb1, label %bb2
bb1:
br label %for.body.preheader
bb2:
- %bfi = ptrtoint float* %bf to i64
+ %bfi = ptrtoint ptr %bf to i64
br label %for.body.preheader
for.body.preheader: ; preds = %entry
br label %for.body
for.body: ; preds = %for.body.preheader, %for.body
- %a.addr.03 = phi float* [ %incdec.ptr, %for.body ], [ %a, %for.body.preheader ]
- %b.addr.float = phi float* [ %b.addr.float.inc, %for.body ], [ %b.float, %for.body.preheader ]
+ %a.addr.03 = phi ptr [ %incdec.ptr, %for.body ], [ %a, %for.body.preheader ]
+ %b.addr.float = phi ptr [ %b.addr.float.inc, %for.body ], [ %b.float, %for.body.preheader ]
%b.addr.i64 = phi i64 [ %b.addr.i64.inc, %for.body ], [ %b.phi, %for.body.preheader ]
- %l = load float, float* %b.addr.float, align 4
+ %l = load float, ptr %b.addr.float, align 4
%mul.i = fmul float %l, 4.200000e+01
- store float %mul.i, float* %a.addr.03, align 4
- %b.addr.float.2 = inttoptr i64 %b.addr.i64 to float*
- %b.addr.float.inc = getelementptr inbounds float, float* %b.addr.float.2, i64 1
- %b.addr.i64.inc = ptrtoint float* %b.addr.float.inc to i64
- %incdec.ptr = getelementptr inbounds float, float* %a.addr.03, i64 1
- %cmp = icmp ult float* %incdec.ptr, %a_end
+ store float %mul.i, ptr %a.addr.03, align 4
+ %b.addr.float.2 = inttoptr i64 %b.addr.i64 to ptr
+ %b.addr.float.inc = getelementptr inbounds float, ptr %b.addr.float.2, i64 1
+ %b.addr.i64.inc = ptrtoint ptr %b.addr.float.inc to i64
+ %incdec.ptr = getelementptr inbounds float, ptr %a.addr.03, i64 1
+ %cmp = icmp ult ptr %incdec.ptr, %a_end
br i1 %cmp, label %for.body, label %for.end
for.end: ; preds = %for.body, %entry
; NOTE: Assertions have been autogenerated by utils/update_test_checks.py
; RUN: opt < %s -passes=instcombine -S | FileCheck %s
-define void @test(float* %a, float* readnone %a_end, i64 %b, float* %bf) unnamed_addr {
+define void @test(ptr %a, ptr readnone %a_end, i64 %b, ptr %bf) unnamed_addr {
; CHECK-LABEL: @test(
; CHECK-NEXT: entry:
-; CHECK-NEXT: [[CMP1:%.*]] = icmp ult float* [[A:%.*]], [[A_END:%.*]]
-; CHECK-NEXT: [[B_FLOAT:%.*]] = inttoptr i64 [[B:%.*]] to float*
+; CHECK-NEXT: [[CMP1:%.*]] = icmp ult ptr [[A:%.*]], [[A_END:%.*]]
+; CHECK-NEXT: [[B_FLOAT:%.*]] = inttoptr i64 [[B:%.*]] to ptr
; CHECK-NEXT: br i1 [[CMP1]], label [[BB1:%.*]], label [[BB2:%.*]]
; CHECK: bb1:
; CHECK-NEXT: br label [[FOR_BODY_PREHEADER:%.*]]
; CHECK: bb2:
-; CHECK-NEXT: [[BFI:%.*]] = ptrtoint float* [[BF:%.*]] to i64
+; CHECK-NEXT: [[BFI:%.*]] = ptrtoint ptr [[BF:%.*]] to i64
; CHECK-NEXT: br label [[FOR_BODY_PREHEADER]]
; CHECK: for.body.preheader:
; CHECK-NEXT: [[B_PHI:%.*]] = phi i64 [ [[B]], [[BB1]] ], [ [[BFI]], [[BB2]] ]
-; CHECK-NEXT: [[B_PHI_PTR:%.*]] = inttoptr i64 [[B_PHI]] to float*
+; CHECK-NEXT: [[B_PHI_PTR:%.*]] = inttoptr i64 [[B_PHI]] to ptr
; CHECK-NEXT: switch i64 [[B]], label [[FOR_BODY:%.*]] [
; CHECK-NEXT: i64 1, label [[FOR_BODY]]
; CHECK-NEXT: ]
; CHECK: for.body:
-; CHECK-NEXT: [[A_ADDR_03:%.*]] = phi float* [ [[INCDEC_PTR:%.*]], [[FOR_BODY]] ], [ [[A]], [[FOR_BODY_PREHEADER]] ], [ [[A]], [[FOR_BODY_PREHEADER]] ]
-; CHECK-NEXT: [[B_ADDR_FLOAT:%.*]] = phi float* [ [[B_ADDR_FLOAT_INC:%.*]], [[FOR_BODY]] ], [ [[B_FLOAT]], [[FOR_BODY_PREHEADER]] ], [ [[B_FLOAT]], [[FOR_BODY_PREHEADER]] ]
-; CHECK-NEXT: [[B_ADDR_I64_PTR:%.*]] = phi float* [ [[B_ADDR_FLOAT_INC]], [[FOR_BODY]] ], [ [[B_PHI_PTR]], [[FOR_BODY_PREHEADER]] ], [ [[B_PHI_PTR]], [[FOR_BODY_PREHEADER]] ]
-; CHECK-NEXT: [[L:%.*]] = load float, float* [[B_ADDR_FLOAT]], align 4
+; CHECK-NEXT: [[A_ADDR_03:%.*]] = phi ptr [ [[INCDEC_PTR:%.*]], [[FOR_BODY]] ], [ [[A]], [[FOR_BODY_PREHEADER]] ], [ [[A]], [[FOR_BODY_PREHEADER]] ]
+; CHECK-NEXT: [[B_ADDR_FLOAT:%.*]] = phi ptr [ [[B_ADDR_FLOAT_INC:%.*]], [[FOR_BODY]] ], [ [[B_FLOAT]], [[FOR_BODY_PREHEADER]] ], [ [[B_FLOAT]], [[FOR_BODY_PREHEADER]] ]
+; CHECK-NEXT: [[B_ADDR_I64_PTR:%.*]] = phi ptr [ [[B_ADDR_FLOAT_INC]], [[FOR_BODY]] ], [ [[B_PHI_PTR]], [[FOR_BODY_PREHEADER]] ], [ [[B_PHI_PTR]], [[FOR_BODY_PREHEADER]] ]
+; CHECK-NEXT: [[L:%.*]] = load float, ptr [[B_ADDR_FLOAT]], align 4
; CHECK-NEXT: [[MUL_I:%.*]] = fmul float [[L]], 4.200000e+01
-; CHECK-NEXT: store float [[MUL_I]], float* [[A_ADDR_03]], align 4
-; CHECK-NEXT: [[B_ADDR_FLOAT_INC]] = getelementptr inbounds float, float* [[B_ADDR_I64_PTR]], i64 1
-; CHECK-NEXT: [[INCDEC_PTR]] = getelementptr inbounds float, float* [[A_ADDR_03]], i64 1
-; CHECK-NEXT: [[CMP:%.*]] = icmp ult float* [[INCDEC_PTR]], [[A_END]]
+; CHECK-NEXT: store float [[MUL_I]], ptr [[A_ADDR_03]], align 4
+; CHECK-NEXT: [[B_ADDR_FLOAT_INC]] = getelementptr inbounds float, ptr [[B_ADDR_I64_PTR]], i64 1
+; CHECK-NEXT: [[INCDEC_PTR]] = getelementptr inbounds float, ptr [[A_ADDR_03]], i64 1
+; CHECK-NEXT: [[CMP:%.*]] = icmp ult ptr [[INCDEC_PTR]], [[A_END]]
; CHECK-NEXT: br i1 [[CMP]], label [[FOR_BODY]], label [[FOR_END:%.*]]
; CHECK: for.end:
; CHECK-NEXT: ret void
;
entry:
- %cmp1 = icmp ult float* %a, %a_end
- %b.float = inttoptr i64 %b to float*
+ %cmp1 = icmp ult ptr %a, %a_end
+ %b.float = inttoptr i64 %b to ptr
br i1 %cmp1, label %bb1, label %bb2
bb1:
br label %for.body.preheader
bb2:
- %bfi = ptrtoint float* %bf to i64
+ %bfi = ptrtoint ptr %bf to i64
br label %for.body.preheader
for.body.preheader: ; preds = %entry
]
for.body: ; preds = %for.body.preheader, %for.body
- %a.addr.03 = phi float* [ %incdec.ptr, %for.body ], [ %a, %for.body.preheader ], [%a, %for.body.preheader]
- %b.addr.float = phi float* [ %b.addr.float.inc, %for.body ], [ %b.float, %for.body.preheader ], [%b.float, %for.body.preheader]
+ %a.addr.03 = phi ptr [ %incdec.ptr, %for.body ], [ %a, %for.body.preheader ], [%a, %for.body.preheader]
+ %b.addr.float = phi ptr [ %b.addr.float.inc, %for.body ], [ %b.float, %for.body.preheader ], [%b.float, %for.body.preheader]
%b.addr.i64 = phi i64 [ %b.addr.i64.inc, %for.body ], [ %b.phi, %for.body.preheader ], [ %b.phi, %for.body.preheader]
- %l = load float, float* %b.addr.float, align 4
+ %l = load float, ptr %b.addr.float, align 4
%mul.i = fmul float %l, 4.200000e+01
- store float %mul.i, float* %a.addr.03, align 4
- %b.addr.float.2 = inttoptr i64 %b.addr.i64 to float*
- %b.addr.float.inc = getelementptr inbounds float, float* %b.addr.float.2, i64 1
- %b.addr.i64.inc = ptrtoint float* %b.addr.float.inc to i64
- %incdec.ptr = getelementptr inbounds float, float* %a.addr.03, i64 1
- %cmp = icmp ult float* %incdec.ptr, %a_end
+ store float %mul.i, ptr %a.addr.03, align 4
+ %b.addr.float.2 = inttoptr i64 %b.addr.i64 to ptr
+ %b.addr.float.inc = getelementptr inbounds float, ptr %b.addr.float.2, i64 1
+ %b.addr.i64.inc = ptrtoint ptr %b.addr.float.inc to i64
+ %incdec.ptr = getelementptr inbounds float, ptr %a.addr.03, i64 1
+ %cmp = icmp ult ptr %incdec.ptr, %a_end
br i1 %cmp, label %for.body, label %for.end
for.end: ; preds = %for.body, %entry
; no crash
%A = type { %B }
-%B = type { %C *}
-%C = type <{ i32 (...)**, i32, [4 x i8] }>
+%B = type { ptr}
+%C = type <{ ptr, i32, [4 x i8] }>
$foo = comdat any
declare i32 @__gxx_personality_v0(...)
; Function Attrs: inlinehint sanitize_memory uwtable
-define void @foo(i1 %c1) local_unnamed_addr #0 comdat align 2 personality i8* bitcast (i32 (...)* @__gxx_personality_v0 to i8*) {
+define void @foo(i1 %c1) local_unnamed_addr #0 comdat align 2 personality ptr @__gxx_personality_v0 {
entry:
- %0 = load %C*, %C** getelementptr inbounds (%A, %A* @bar, i64 0, i32 0, i32 0), align 8
- %1 = ptrtoint %C* %0 to i64
- %count.i.i.i23 = getelementptr inbounds %C, %C* %0, i64 0, i32 1
- store i32 0, i32* %count.i.i.i23, align 8
- %2 = invoke i8* @_Znwm() #3
+ %0 = load ptr, ptr @bar, align 8
+ %1 = ptrtoint ptr %0 to i64
+ %count.i.i.i23 = getelementptr inbounds %C, ptr %0, i64 0, i32 1
+ store i32 0, ptr %count.i.i.i23, align 8
+ %2 = invoke ptr @_Znwm() #3
to label %invoke.cont unwind label %lpad
invoke.cont: ; preds = %entry
- %call.i25 = invoke i8* @_Znwm() #3
+ %call.i25 = invoke ptr @_Znwm() #3
to label %call.i.noexc unwind label %lpad4
call.i.noexc: ; preds = %invoke.cont
to label %invoke.cont5 unwind label %lpad.i
lpad.i: ; preds = %call.i.noexc
- %3 = landingpad { i8*, i32 }
+ %3 = landingpad { ptr, i32 }
cleanup
br label %ehcleanup
invoke.cont5: ; preds = %call.i.noexc
- %4 = ptrtoint i8* %call.i25 to i64
+ %4 = ptrtoint ptr %call.i25 to i64
invoke void @scale()
to label %invoke.cont16 unwind label %lpad15
ret void
lpad: ; preds = %entry
- %5 = landingpad { i8*, i32 }
+ %5 = landingpad { ptr, i32 }
cleanup
unreachable
lpad4: ; preds = %invoke.cont
- %6 = landingpad { i8*, i32 }
+ %6 = landingpad { ptr, i32 }
cleanup
unreachable
br label %ehcleanup21
lpad15: ; preds = %invoke.cont5
- %7 = landingpad { i8*, i32 }
+ %7 = landingpad { ptr, i32 }
cleanup
br label %ehcleanup21
ehcleanup21: ; preds = %lpad15, %ehcleanup
%actual_other.sroa.0.0 = phi i64 [ %1, %ehcleanup ], [ %4, %lpad15 ]
- %8 = inttoptr i64 %actual_other.sroa.0.0 to %C*
+ %8 = inttoptr i64 %actual_other.sroa.0.0 to ptr
br i1 %c1, label %_ZN4CGAL6HandleD2Ev.exit, label %land.lhs.true.i
land.lhs.true.i: ; preds = %ehcleanup21
- %count.i = getelementptr inbounds %C, %C* %8, i64 0, i32 1
- %9 = load i32, i32* %count.i, align 8
+ %count.i = getelementptr inbounds %C, ptr %8, i64 0, i32 1
+ %9 = load i32, ptr %count.i, align 8
unreachable
_ZN4CGAL6HandleD2Ev.exit: ; preds = %ehcleanup21
- resume { i8*, i32 } undef
+ resume { ptr, i32 } undef
}
; Function Attrs: nobuiltin
-declare noalias nonnull i8* @_Znwm() local_unnamed_addr #1
+declare noalias nonnull ptr @_Znwm() local_unnamed_addr #1
; Function Attrs: sanitize_memory uwtable
declare void @scale() local_unnamed_addr #2 align 2
; NOTE: Assertions have been autogenerated by utils/update_test_checks.py
; RUN: opt < %s -passes=instcombine -S | FileCheck %s
-define void @matching_phi(i64 %a, float* %b, i1 %cond) {
+define void @matching_phi(i64 %a, ptr %b, i1 %cond) {
; CHECK-LABEL: @matching_phi(
; CHECK-NEXT: entry:
; CHECK-NEXT: br i1 [[COND:%.*]], label [[BB2:%.*]], label [[BB1:%.*]]
; CHECK: bb1:
-; CHECK-NEXT: [[ADDB:%.*]] = getelementptr inbounds float, float* [[B:%.*]], i64 2
+; CHECK-NEXT: [[ADDB:%.*]] = getelementptr inbounds float, ptr [[B:%.*]], i64 2
; CHECK-NEXT: br label [[BB3:%.*]]
; CHECK: bb2:
; CHECK-NEXT: [[ADD_INT:%.*]] = add i64 [[A:%.*]], 1
-; CHECK-NEXT: [[ADD:%.*]] = inttoptr i64 [[ADD_INT]] to float*
-; CHECK-NEXT: store float 1.000000e+01, float* [[ADD]], align 4
+; CHECK-NEXT: [[ADD:%.*]] = inttoptr i64 [[ADD_INT]] to ptr
+; CHECK-NEXT: store float 1.000000e+01, ptr [[ADD]], align 4
; CHECK-NEXT: br label [[BB3]]
; CHECK: bb3:
-; CHECK-NEXT: [[A_ADDR_03:%.*]] = phi float* [ [[ADDB]], [[BB1]] ], [ [[ADD]], [[BB2]] ]
-; CHECK-NEXT: [[I1:%.*]] = load float, float* [[A_ADDR_03]], align 4
+; CHECK-NEXT: [[A_ADDR_03:%.*]] = phi ptr [ [[ADDB]], [[BB1]] ], [ [[ADD]], [[BB2]] ]
+; CHECK-NEXT: [[I1:%.*]] = load float, ptr [[A_ADDR_03]], align 4
; CHECK-NEXT: [[MUL_I:%.*]] = fmul float [[I1]], 4.200000e+01
-; CHECK-NEXT: store float [[MUL_I]], float* [[A_ADDR_03]], align 4
+; CHECK-NEXT: store float [[MUL_I]], ptr [[A_ADDR_03]], align 4
; CHECK-NEXT: ret void
;
entry:
%cmp1 = icmp eq i1 %cond, 0
%add.int = add i64 %a, 1
- %add = inttoptr i64 %add.int to float *
+ %add = inttoptr i64 %add.int to ptr
- %addb = getelementptr inbounds float, float* %b, i64 2
- %addb.int = ptrtoint float* %addb to i64
+ %addb = getelementptr inbounds float, ptr %b, i64 2
+ %addb.int = ptrtoint ptr %addb to i64
br i1 %cmp1, label %bb1, label %bb2
bb1:
br label %bb3
bb2:
- store float 1.0e+01, float* %add, align 4
+ store float 1.0e+01, ptr %add, align 4
br label %bb3
bb3:
- %a.addr.03 = phi float* [ %addb, %bb1 ], [ %add, %bb2 ]
+ %a.addr.03 = phi ptr [ %addb, %bb1 ], [ %add, %bb2 ]
%b.addr.02 = phi i64 [ %addb.int, %bb1 ], [ %add.int, %bb2 ]
- %i0 = inttoptr i64 %b.addr.02 to float*
- %i1 = load float, float* %i0, align 4
+ %i0 = inttoptr i64 %b.addr.02 to ptr
+ %i1 = load float, ptr %i0, align 4
%mul.i = fmul float %i1, 4.200000e+01
- store float %mul.i, float* %a.addr.03, align 4
+ store float %mul.i, ptr %a.addr.03, align 4
ret void
}
-define void @no_matching_phi(i64 %a, float* %b, i1 %cond) {
+define void @no_matching_phi(i64 %a, ptr %b, i1 %cond) {
; CHECK-LABEL: @no_matching_phi(
; CHECK-NEXT: entry:
; CHECK-NEXT: [[ADD_INT:%.*]] = add i64 [[A:%.*]], 1
-; CHECK-NEXT: [[ADDB:%.*]] = getelementptr inbounds float, float* [[B:%.*]], i64 2
+; CHECK-NEXT: [[ADDB:%.*]] = getelementptr inbounds float, ptr [[B:%.*]], i64 2
; CHECK-NEXT: br i1 [[COND:%.*]], label [[B:%.*]], label [[A:%.*]]
; CHECK: A:
; CHECK-NEXT: br label [[C:%.*]]
; CHECK: B:
-; CHECK-NEXT: [[ADDB_INT:%.*]] = ptrtoint float* [[ADDB]] to i64
-; CHECK-NEXT: [[ADD:%.*]] = inttoptr i64 [[ADD_INT]] to float*
-; CHECK-NEXT: store float 1.000000e+01, float* [[ADD]], align 4
+; CHECK-NEXT: [[ADDB_INT:%.*]] = ptrtoint ptr [[ADDB]] to i64
+; CHECK-NEXT: [[ADD:%.*]] = inttoptr i64 [[ADD_INT]] to ptr
+; CHECK-NEXT: store float 1.000000e+01, ptr [[ADD]], align 4
; CHECK-NEXT: br label [[C]]
; CHECK: C:
-; CHECK-NEXT: [[A_ADDR_03:%.*]] = phi float* [ [[ADDB]], [[A]] ], [ [[ADD]], [[B]] ]
+; CHECK-NEXT: [[A_ADDR_03:%.*]] = phi ptr [ [[ADDB]], [[A]] ], [ [[ADD]], [[B]] ]
; CHECK-NEXT: [[B_ADDR_02:%.*]] = phi i64 [ [[ADD_INT]], [[A]] ], [ [[ADDB_INT]], [[B]] ]
-; CHECK-NEXT: [[I0:%.*]] = inttoptr i64 [[B_ADDR_02]] to float*
-; CHECK-NEXT: [[I1:%.*]] = load float, float* [[I0]], align 4
+; CHECK-NEXT: [[I0:%.*]] = inttoptr i64 [[B_ADDR_02]] to ptr
+; CHECK-NEXT: [[I1:%.*]] = load float, ptr [[I0]], align 4
; CHECK-NEXT: [[MUL_I:%.*]] = fmul float [[I1]], 4.200000e+01
-; CHECK-NEXT: store float [[MUL_I]], float* [[A_ADDR_03]], align 4
+; CHECK-NEXT: store float [[MUL_I]], ptr [[A_ADDR_03]], align 4
; CHECK-NEXT: ret void
;
entry:
%cmp1 = icmp eq i1 %cond, 0
%add.int = add i64 %a, 1
- %add = inttoptr i64 %add.int to float *
+ %add = inttoptr i64 %add.int to ptr
- %addb = getelementptr inbounds float, float* %b, i64 2
- %addb.int = ptrtoint float* %addb to i64
+ %addb = getelementptr inbounds float, ptr %b, i64 2
+ %addb.int = ptrtoint ptr %addb to i64
br i1 %cmp1, label %A, label %B
A:
br label %C
B:
- store float 1.0e+01, float* %add, align 4
+ store float 1.0e+01, ptr %add, align 4
br label %C
C:
- %a.addr.03 = phi float* [ %addb, %A ], [ %add, %B ]
+ %a.addr.03 = phi ptr [ %addb, %A ], [ %add, %B ]
%b.addr.02 = phi i64 [ %addb.int, %B ], [ %add.int, %A ]
- %i0 = inttoptr i64 %b.addr.02 to float*
- %i1 = load float, float* %i0, align 4
+ %i0 = inttoptr i64 %b.addr.02 to ptr
+ %i1 = load float, ptr %i0, align 4
%mul.i = fmul float %i1, 4.200000e+01
- store float %mul.i, float* %a.addr.03, align 4
+ store float %mul.i, ptr %a.addr.03, align 4
ret void
}
target datalayout = "e-m:w-p270:32:32-p271:32:32-p272:64:64-i64:64-f80:128-n8:16:32:64-S128"
target triple = "x86_64-pc-windows-msvc"
-%struct.intrusive_ptr = type { %struct.C* }
+%struct.intrusive_ptr = type { ptr }
%struct.C = type { %struct.intrusive_ref_counter }
%struct.intrusive_ref_counter = type { i32 }
-declare dso_local %struct.C* @"?mk@@YAPEAUC@@XZ"() #3
-declare dso_local void @"?intrusive_ptr_release@@YAXPEBUintrusive_ref_counter@@@Z"(%struct.intrusive_ref_counter*) #3
+declare dso_local ptr @"?mk@@YAPEAUC@@XZ"() #3
+declare dso_local void @"?intrusive_ptr_release@@YAXPEBUintrusive_ref_counter@@@Z"(ptr) #3
declare dso_local void @"?terminate@@YAXXZ"()
declare dso_local i32 @__CxxFrameHandler3(...)
-define dso_local void @"?crash@@YAXXZ"() local_unnamed_addr #0 personality i8* bitcast (i32 (...)* @__CxxFrameHandler3 to i8*) {
+define dso_local void @"?crash@@YAXXZ"() local_unnamed_addr #0 personality ptr @__CxxFrameHandler3 {
entry:
- %call1 = invoke %struct.C* @"?mk@@YAPEAUC@@XZ"()
+ %call1 = invoke ptr @"?mk@@YAPEAUC@@XZ"()
to label %invoke.cont2 unwind label %catch.dispatch
invoke.cont2: ; preds = %entry
- %0 = ptrtoint %struct.C* %call1 to i64
- %call5 = invoke %struct.C* @"?mk@@YAPEAUC@@XZ"()
+ %0 = ptrtoint ptr %call1 to i64
+ %call5 = invoke ptr @"?mk@@YAPEAUC@@XZ"()
to label %try.cont unwind label %catch.dispatch
catch.dispatch: ; preds = %invoke.cont2, %entry
%1 = catchswitch within none [label %catch] unwind label %ehcleanup
catch: ; preds = %catch.dispatch
- %2 = catchpad within %1 [i8* null, i32 64, i8* null]
+ %2 = catchpad within %1 [ptr null, i32 64, ptr null]
catchret from %2 to label %try.cont
try.cont: ; preds = %invoke.cont2, %catch
%a.sroa.0.1 = phi i64 [ %0, %invoke.cont2 ], [ %a.sroa.0.0, %catch ]
- %3 = inttoptr i64 %a.sroa.0.1 to %struct.C*
- %tobool.i3 = icmp eq %struct.C* %3, null
+ %3 = inttoptr i64 %a.sroa.0.1 to ptr
+ %tobool.i3 = icmp eq ptr %3, null
br i1 %tobool.i3, label %"??1?$intrusive_ptr@UC@@@@QEAA@XZ.exit6", label %if.then.i4
if.then.i4: ; preds = %try.cont
- %4 = getelementptr %struct.C, %struct.C* %3, i64 0, i32 0
- invoke void @"?intrusive_ptr_release@@YAXPEBUintrusive_ref_counter@@@Z"(%struct.intrusive_ref_counter* %4)
+ invoke void @"?intrusive_ptr_release@@YAXPEBUintrusive_ref_counter@@@Z"(ptr %3)
to label %"??1?$intrusive_ptr@UC@@@@QEAA@XZ.exit6" unwind label %terminate.i5
terminate.i5: ; preds = %if.then.i4
- %5 = cleanuppad within none []
- call void @"?terminate@@YAXXZ"() #4 [ "funclet"(token %5) ]
+ %4 = cleanuppad within none []
+ call void @"?terminate@@YAXXZ"() #4 [ "funclet"(token %4) ]
unreachable
"??1?$intrusive_ptr@UC@@@@QEAA@XZ.exit6": ; preds = %try.cont, %if.then.i4
ret void
ehcleanup: ; preds = %catch.dispatch
- %6 = cleanuppad within none []
- %7 = inttoptr i64 %a.sroa.0.0 to %struct.C*
- %tobool.i = icmp eq %struct.C* %7, null
+ %5 = cleanuppad within none []
+ %6 = inttoptr i64 %a.sroa.0.0 to ptr
+ %tobool.i = icmp eq ptr %6, null
br i1 %tobool.i, label %"??1?$intrusive_ptr@UC@@@@QEAA@XZ.exit", label %if.then.i
if.then.i: ; preds = %ehcleanup
- %8 = getelementptr %struct.C, %struct.C* %7, i64 0, i32 0
- invoke void @"?intrusive_ptr_release@@YAXPEBUintrusive_ref_counter@@@Z"(%struct.intrusive_ref_counter* %8) [ "funclet"(token %6) ]
+ invoke void @"?intrusive_ptr_release@@YAXPEBUintrusive_ref_counter@@@Z"(ptr %6) [ "funclet"(token %5) ]
to label %"??1?$intrusive_ptr@UC@@@@QEAA@XZ.exit" unwind label %terminate.i
terminate.i: ; preds = %if.then.i
- %9 = cleanuppad within %6 []
- call void @"?terminate@@YAXXZ"() #4 [ "funclet"(token %9) ]
+ %7 = cleanuppad within %5 []
+ call void @"?terminate@@YAXXZ"() #4 [ "funclet"(token %7) ]
unreachable
"??1?$intrusive_ptr@UC@@@@QEAA@XZ.exit": ; preds = %ehcleanup, %if.then.i
- cleanupret from %6 unwind to caller
+ cleanupret from %5 unwind to caller
}
; CHECK-LABEL: define dso_local void @"?crash@@YAXXZ"
declare double @llvm.rint.f64(double %Val) nounwind readonly
declare double @llvm.nearbyint.f64(double %Val) nounwind readonly
-define void @powi(double %V, double *%P) {
+define void @powi(double %V, ptr %P) {
; CHECK-LABEL: @powi(
; CHECK-NEXT: [[A:%.*]] = fdiv fast double 1.000000e+00, [[V:%.*]]
-; CHECK-NEXT: store volatile double [[A]], double* [[P:%.*]], align 8
+; CHECK-NEXT: store volatile double [[A]], ptr [[P:%.*]], align 8
; CHECK-NEXT: [[D:%.*]] = fmul nnan double [[V]], [[V]]
-; CHECK-NEXT: store volatile double [[D]], double* [[P]], align 8
+; CHECK-NEXT: store volatile double [[D]], ptr [[P]], align 8
; CHECK-NEXT: [[A2:%.*]] = fdiv fast double 1.000000e+00, [[V]]
-; CHECK-NEXT: store volatile double [[A2]], double* [[P]], align 8
+; CHECK-NEXT: store volatile double [[A2]], ptr [[P]], align 8
; CHECK-NEXT: [[D2:%.*]] = fmul nnan double [[V]], [[V]]
-; CHECK-NEXT: store volatile double [[D2]], double* [[P]], align 8
+; CHECK-NEXT: store volatile double [[D2]], ptr [[P]], align 8
; CHECK-NEXT: ret void
;
%A = tail call fast double @llvm.powi.f64.i32(double %V, i32 -1) nounwind
- store volatile double %A, double* %P
+ store volatile double %A, ptr %P
%D = tail call nnan double @llvm.powi.f64.i32(double %V, i32 2) nounwind
- store volatile double %D, double* %P
+ store volatile double %D, ptr %P
%A2 = tail call fast double @llvm.powi.f64.i16(double %V, i16 -1) nounwind
- store volatile double %A2, double* %P
+ store volatile double %A2, ptr %P
%D2 = tail call nnan double @llvm.powi.f64.i16(double %V, i16 2) nounwind
- store volatile double %D2, double* %P
+ store volatile double %D2, ptr %P
ret void
}
ret <2 x i32> %s
}
-define void @cos(double *%P) {
+define void @cos(ptr %P) {
; CHECK-LABEL: @cos(
-; CHECK-NEXT: store volatile double 1.000000e+00, double* [[P:%.*]], align 8
+; CHECK-NEXT: store volatile double 1.000000e+00, ptr [[P:%.*]], align 8
; CHECK-NEXT: ret void
;
%B = tail call double @llvm.cos.f64(double 0.0) nounwind
- store volatile double %B, double* %P
+ store volatile double %B, ptr %P
ret void
}
-define void @sin(double *%P) {
+define void @sin(ptr %P) {
; CHECK-LABEL: @sin(
-; CHECK-NEXT: store volatile double 0.000000e+00, double* [[P:%.*]], align 8
+; CHECK-NEXT: store volatile double 0.000000e+00, ptr [[P:%.*]], align 8
; CHECK-NEXT: ret void
;
%B = tail call double @llvm.sin.f64(double 0.0) nounwind
- store volatile double %B, double* %P
+ store volatile double %B, ptr %P
ret void
}
-define void @floor(double *%P) {
+define void @floor(ptr %P) {
; CHECK-LABEL: @floor(
-; CHECK-NEXT: store volatile double 1.000000e+00, double* [[P:%.*]], align 8
-; CHECK-NEXT: store volatile double -2.000000e+00, double* [[P]], align 8
+; CHECK-NEXT: store volatile double 1.000000e+00, ptr [[P:%.*]], align 8
+; CHECK-NEXT: store volatile double -2.000000e+00, ptr [[P]], align 8
; CHECK-NEXT: ret void
;
%B = tail call double @llvm.floor.f64(double 1.5) nounwind
- store volatile double %B, double* %P
+ store volatile double %B, ptr %P
%C = tail call double @llvm.floor.f64(double -1.5) nounwind
- store volatile double %C, double* %P
+ store volatile double %C, ptr %P
ret void
}
-define void @ceil(double *%P) {
+define void @ceil(ptr %P) {
; CHECK-LABEL: @ceil(
-; CHECK-NEXT: store volatile double 2.000000e+00, double* [[P:%.*]], align 8
-; CHECK-NEXT: store volatile double -1.000000e+00, double* [[P]], align 8
+; CHECK-NEXT: store volatile double 2.000000e+00, ptr [[P:%.*]], align 8
+; CHECK-NEXT: store volatile double -1.000000e+00, ptr [[P]], align 8
; CHECK-NEXT: ret void
;
%B = tail call double @llvm.ceil.f64(double 1.5) nounwind
- store volatile double %B, double* %P
+ store volatile double %B, ptr %P
%C = tail call double @llvm.ceil.f64(double -1.5) nounwind
- store volatile double %C, double* %P
+ store volatile double %C, ptr %P
ret void
}
-define void @trunc(double *%P) {
+define void @trunc(ptr %P) {
; CHECK-LABEL: @trunc(
-; CHECK-NEXT: store volatile double 1.000000e+00, double* [[P:%.*]], align 8
-; CHECK-NEXT: store volatile double -1.000000e+00, double* [[P]], align 8
+; CHECK-NEXT: store volatile double 1.000000e+00, ptr [[P:%.*]], align 8
+; CHECK-NEXT: store volatile double -1.000000e+00, ptr [[P]], align 8
; CHECK-NEXT: ret void
;
%B = tail call double @llvm.trunc.f64(double 1.5) nounwind
- store volatile double %B, double* %P
+ store volatile double %B, ptr %P
%C = tail call double @llvm.trunc.f64(double -1.5) nounwind
- store volatile double %C, double* %P
+ store volatile double %C, ptr %P
ret void
}
-define void @rint(double *%P) {
+define void @rint(ptr %P) {
; CHECK-LABEL: @rint(
-; CHECK-NEXT: store volatile double 2.000000e+00, double* [[P:%.*]], align 8
-; CHECK-NEXT: store volatile double -2.000000e+00, double* [[P]], align 8
+; CHECK-NEXT: store volatile double 2.000000e+00, ptr [[P:%.*]], align 8
+; CHECK-NEXT: store volatile double -2.000000e+00, ptr [[P]], align 8
; CHECK-NEXT: ret void
;
%B = tail call double @llvm.rint.f64(double 1.5) nounwind
- store volatile double %B, double* %P
+ store volatile double %B, ptr %P
%C = tail call double @llvm.rint.f64(double -1.5) nounwind
- store volatile double %C, double* %P
+ store volatile double %C, ptr %P
ret void
}
-define void @nearbyint(double *%P) {
+define void @nearbyint(ptr %P) {
; CHECK-LABEL: @nearbyint(
-; CHECK-NEXT: store volatile double 2.000000e+00, double* [[P:%.*]], align 8
-; CHECK-NEXT: store volatile double -2.000000e+00, double* [[P]], align 8
+; CHECK-NEXT: store volatile double 2.000000e+00, ptr [[P:%.*]], align 8
+; CHECK-NEXT: store volatile double -2.000000e+00, ptr [[P]], align 8
; CHECK-NEXT: ret void
;
%B = tail call double @llvm.nearbyint.f64(double 1.5) nounwind
- store volatile double %B, double* %P
+ store volatile double %B, ptr %P
%C = tail call double @llvm.nearbyint.f64(double -1.5) nounwind
- store volatile double %C, double* %P
+ store volatile double %C, ptr %P
ret void
}
; NOTE: Assertions have been autogenerated by utils/update_test_checks.py
; RUN: opt -instcombine -early-cse -earlycse-debug-hash -S < %s | FileCheck %s
-define i8* @simplifyNullLaunder() {
+define ptr @simplifyNullLaunder() {
; CHECK-LABEL: @simplifyNullLaunder(
-; CHECK-NEXT: ret i8* null
+; CHECK-NEXT: ret ptr null
;
- %b2 = call i8* @llvm.launder.invariant.group.p0i8(i8* null)
- ret i8* %b2
+ %b2 = call ptr @llvm.launder.invariant.group.p0(ptr null)
+ ret ptr %b2
}
-define i8* @dontSimplifyNullLaunderNoNullOpt() #0 {
+define ptr @dontSimplifyNullLaunderNoNullOpt() #0 {
; CHECK-LABEL: @dontSimplifyNullLaunderNoNullOpt(
-; CHECK-NEXT: [[B2:%.*]] = call i8* @llvm.launder.invariant.group.p0i8(i8* null)
-; CHECK-NEXT: ret i8* [[B2]]
+; CHECK-NEXT: [[B2:%.*]] = call ptr @llvm.launder.invariant.group.p0(ptr null)
+; CHECK-NEXT: ret ptr [[B2]]
;
- %b2 = call i8* @llvm.launder.invariant.group.p0i8(i8* null)
- ret i8* %b2
+ %b2 = call ptr @llvm.launder.invariant.group.p0(ptr null)
+ ret ptr %b2
}
-define i8 addrspace(42)* @dontsimplifyNullLaunderForDifferentAddrspace() {
+define ptr addrspace(42) @dontsimplifyNullLaunderForDifferentAddrspace() {
; CHECK-LABEL: @dontsimplifyNullLaunderForDifferentAddrspace(
-; CHECK-NEXT: [[B2:%.*]] = call i8 addrspace(42)* @llvm.launder.invariant.group.p42i8(i8 addrspace(42)* null)
-; CHECK-NEXT: ret i8 addrspace(42)* [[B2]]
+; CHECK-NEXT: [[B2:%.*]] = call ptr addrspace(42) @llvm.launder.invariant.group.p42(ptr addrspace(42) null)
+; CHECK-NEXT: ret ptr addrspace(42) [[B2]]
;
- %b2 = call i8 addrspace(42)* @llvm.launder.invariant.group.p42i8(i8 addrspace(42)* null)
- ret i8 addrspace(42)* %b2
+ %b2 = call ptr addrspace(42) @llvm.launder.invariant.group.p42(ptr addrspace(42) null)
+ ret ptr addrspace(42) %b2
}
-define i8* @simplifyUndefLaunder() {
+define ptr @simplifyUndefLaunder() {
; CHECK-LABEL: @simplifyUndefLaunder(
-; CHECK-NEXT: ret i8* undef
+; CHECK-NEXT: ret ptr undef
;
- %b2 = call i8* @llvm.launder.invariant.group.p0i8(i8* undef)
- ret i8* %b2
+ %b2 = call ptr @llvm.launder.invariant.group.p0(ptr undef)
+ ret ptr %b2
}
-define i8 addrspace(42)* @simplifyUndefLaunder2() {
+define ptr addrspace(42) @simplifyUndefLaunder2() {
; CHECK-LABEL: @simplifyUndefLaunder2(
-; CHECK-NEXT: ret i8 addrspace(42)* undef
+; CHECK-NEXT: ret ptr addrspace(42) undef
;
- %b2 = call i8 addrspace(42)* @llvm.launder.invariant.group.p42i8(i8 addrspace(42)* undef)
- ret i8 addrspace(42)* %b2
+ %b2 = call ptr addrspace(42) @llvm.launder.invariant.group.p42(ptr addrspace(42) undef)
+ ret ptr addrspace(42) %b2
}
-define i8* @simplifyNullStrip() {
+define ptr @simplifyNullStrip() {
; CHECK-LABEL: @simplifyNullStrip(
-; CHECK-NEXT: ret i8* null
+; CHECK-NEXT: ret ptr null
;
- %b2 = call i8* @llvm.strip.invariant.group.p0i8(i8* null)
- ret i8* %b2
+ %b2 = call ptr @llvm.strip.invariant.group.p0(ptr null)
+ ret ptr %b2
}
-define i8* @dontSimplifyNullStripNonNullOpt() #0 {
+define ptr @dontSimplifyNullStripNonNullOpt() #0 {
; CHECK-LABEL: @dontSimplifyNullStripNonNullOpt(
-; CHECK-NEXT: [[B2:%.*]] = call i8* @llvm.strip.invariant.group.p0i8(i8* null)
-; CHECK-NEXT: ret i8* [[B2]]
+; CHECK-NEXT: [[B2:%.*]] = call ptr @llvm.strip.invariant.group.p0(ptr null)
+; CHECK-NEXT: ret ptr [[B2]]
;
- %b2 = call i8* @llvm.strip.invariant.group.p0i8(i8* null)
- ret i8* %b2
+ %b2 = call ptr @llvm.strip.invariant.group.p0(ptr null)
+ ret ptr %b2
}
-define i8 addrspace(42)* @dontsimplifyNullStripForDifferentAddrspace() {
+define ptr addrspace(42) @dontsimplifyNullStripForDifferentAddrspace() {
; CHECK-LABEL: @dontsimplifyNullStripForDifferentAddrspace(
-; CHECK-NEXT: [[B2:%.*]] = call i8 addrspace(42)* @llvm.strip.invariant.group.p42i8(i8 addrspace(42)* null)
-; CHECK-NEXT: ret i8 addrspace(42)* [[B2]]
+; CHECK-NEXT: [[B2:%.*]] = call ptr addrspace(42) @llvm.strip.invariant.group.p42(ptr addrspace(42) null)
+; CHECK-NEXT: ret ptr addrspace(42) [[B2]]
;
- %b2 = call i8 addrspace(42)* @llvm.strip.invariant.group.p42i8(i8 addrspace(42)* null)
- ret i8 addrspace(42)* %b2
+ %b2 = call ptr addrspace(42) @llvm.strip.invariant.group.p42(ptr addrspace(42) null)
+ ret ptr addrspace(42) %b2
}
-define i8* @simplifyUndefStrip() {
+define ptr @simplifyUndefStrip() {
; CHECK-LABEL: @simplifyUndefStrip(
-; CHECK-NEXT: ret i8* undef
+; CHECK-NEXT: ret ptr undef
;
- %b2 = call i8* @llvm.strip.invariant.group.p0i8(i8* undef)
- ret i8* %b2
+ %b2 = call ptr @llvm.strip.invariant.group.p0(ptr undef)
+ ret ptr %b2
}
-define i8 addrspace(42)* @simplifyUndefStrip2() {
+define ptr addrspace(42) @simplifyUndefStrip2() {
; CHECK-LABEL: @simplifyUndefStrip2(
-; CHECK-NEXT: ret i8 addrspace(42)* undef
+; CHECK-NEXT: ret ptr addrspace(42) undef
;
- %b2 = call i8 addrspace(42)* @llvm.strip.invariant.group.p42i8(i8 addrspace(42)* undef)
- ret i8 addrspace(42)* %b2
+ %b2 = call ptr addrspace(42) @llvm.strip.invariant.group.p42(ptr addrspace(42) undef)
+ ret ptr addrspace(42) %b2
}
-define i8* @simplifyLaunderOfLaunder(i8* %a) {
+define ptr @simplifyLaunderOfLaunder(ptr %a) {
; CHECK-LABEL: @simplifyLaunderOfLaunder(
-; CHECK-NEXT: [[TMP1:%.*]] = call i8* @llvm.launder.invariant.group.p0i8(i8* [[A:%.*]])
-; CHECK-NEXT: ret i8* [[TMP1]]
+; CHECK-NEXT: [[TMP1:%.*]] = call ptr @llvm.launder.invariant.group.p0(ptr [[A:%.*]])
+; CHECK-NEXT: ret ptr [[TMP1]]
;
- %a2 = call i8* @llvm.launder.invariant.group.p0i8(i8* %a)
- %a3 = call i8* @llvm.launder.invariant.group.p0i8(i8* %a2)
- ret i8* %a3
+ %a2 = call ptr @llvm.launder.invariant.group.p0(ptr %a)
+ %a3 = call ptr @llvm.launder.invariant.group.p0(ptr %a2)
+ ret ptr %a3
}
-define i8* @simplifyStripOfLaunder(i8* %a) {
+define ptr @simplifyStripOfLaunder(ptr %a) {
; CHECK-LABEL: @simplifyStripOfLaunder(
-; CHECK-NEXT: [[TMP1:%.*]] = call i8* @llvm.strip.invariant.group.p0i8(i8* [[A:%.*]])
-; CHECK-NEXT: ret i8* [[TMP1]]
+; CHECK-NEXT: [[TMP1:%.*]] = call ptr @llvm.strip.invariant.group.p0(ptr [[A:%.*]])
+; CHECK-NEXT: ret ptr [[TMP1]]
;
- %a2 = call i8* @llvm.launder.invariant.group.p0i8(i8* %a)
- %a3 = call i8* @llvm.strip.invariant.group.p0i8(i8* %a2)
- ret i8* %a3
+ %a2 = call ptr @llvm.launder.invariant.group.p0(ptr %a)
+ %a3 = call ptr @llvm.strip.invariant.group.p0(ptr %a2)
+ ret ptr %a3
}
-define i1 @simplifyForCompare(i8* %a) {
+define i1 @simplifyForCompare(ptr %a) {
; CHECK-LABEL: @simplifyForCompare(
-; CHECK-NEXT: [[TMP1:%.*]] = call i8* @llvm.strip.invariant.group.p0i8(i8* [[A:%.*]])
+; CHECK-NEXT: [[TMP1:%.*]] = call ptr @llvm.strip.invariant.group.p0(ptr [[A:%.*]])
; CHECK-NEXT: ret i1 true
;
- %a2 = call i8* @llvm.launder.invariant.group.p0i8(i8* %a)
+ %a2 = call ptr @llvm.launder.invariant.group.p0(ptr %a)
- %a3 = call i8* @llvm.strip.invariant.group.p0i8(i8* %a2)
- %b2 = call i8* @llvm.strip.invariant.group.p0i8(i8* %a)
- %c = icmp eq i8* %a3, %b2
+ %a3 = call ptr @llvm.strip.invariant.group.p0(ptr %a2)
+ %b2 = call ptr @llvm.strip.invariant.group.p0(ptr %a)
+ %c = icmp eq ptr %a3, %b2
ret i1 %c
}
-define i16* @skipWithDifferentTypes(i8* %a) {
+define ptr @skipWithDifferentTypes(ptr %a) {
; CHECK-LABEL: @skipWithDifferentTypes(
-; CHECK-NEXT: [[TMP1:%.*]] = call i8* @llvm.strip.invariant.group.p0i8(i8* [[A:%.*]])
-; CHECK-NEXT: [[TMP2:%.*]] = bitcast i8* [[TMP1]] to i16*
-; CHECK-NEXT: ret i16* [[TMP2]]
+; CHECK-NEXT: [[TMP1:%.*]] = call ptr @llvm.strip.invariant.group.p0(ptr [[A:%.*]])
+; CHECK-NEXT: ret ptr [[TMP1]]
;
- %a2 = call i8* @llvm.launder.invariant.group.p0i8(i8* %a)
- %c1 = bitcast i8* %a2 to i16*
+ %a2 = call ptr @llvm.launder.invariant.group.p0(ptr %a)
- %a3 = call i16* @llvm.strip.invariant.group.p0i16(i16* %c1)
- ret i16* %a3
+ %a3 = call ptr @llvm.strip.invariant.group.p0(ptr %a2)
+ ret ptr %a3
}
-define i16 addrspace(42)* @skipWithDifferentTypesAddrspace(i8 addrspace(42)* %a) {
+define ptr addrspace(42) @skipWithDifferentTypesAddrspace(ptr addrspace(42) %a) {
; CHECK-LABEL: @skipWithDifferentTypesAddrspace(
-; CHECK-NEXT: [[TMP1:%.*]] = call i8 addrspace(42)* @llvm.strip.invariant.group.p42i8(i8 addrspace(42)* [[A:%.*]])
-; CHECK-NEXT: [[TMP2:%.*]] = bitcast i8 addrspace(42)* [[TMP1]] to i16 addrspace(42)*
-; CHECK-NEXT: ret i16 addrspace(42)* [[TMP2]]
+; CHECK-NEXT: [[TMP1:%.*]] = call ptr addrspace(42) @llvm.strip.invariant.group.p42(ptr addrspace(42) [[A:%.*]])
+; CHECK-NEXT: ret ptr addrspace(42) [[TMP1]]
;
- %a2 = call i8 addrspace(42)* @llvm.launder.invariant.group.p42i8(i8 addrspace(42)* %a)
- %c1 = bitcast i8 addrspace(42)* %a2 to i16 addrspace(42)*
+ %a2 = call ptr addrspace(42) @llvm.launder.invariant.group.p42(ptr addrspace(42) %a)
- %a3 = call i16 addrspace(42)* @llvm.strip.invariant.group.p42i16(i16 addrspace(42)* %c1)
- ret i16 addrspace(42)* %a3
+ %a3 = call ptr addrspace(42) @llvm.strip.invariant.group.p42(ptr addrspace(42) %a2)
+ ret ptr addrspace(42) %a3
}
-define i16 addrspace(42)* @skipWithDifferentTypesDifferentAddrspace(i8* %a) {
+define ptr addrspace(42) @skipWithDifferentTypesDifferentAddrspace(ptr %a) {
; CHECK-LABEL: @skipWithDifferentTypesDifferentAddrspace(
-; CHECK-NEXT: [[TMP1:%.*]] = call i8* @llvm.strip.invariant.group.p0i8(i8* [[A:%.*]])
-; CHECK-NEXT: [[TMP2:%.*]] = bitcast i8* [[TMP1]] to i16*
-; CHECK-NEXT: [[TMP3:%.*]] = addrspacecast i16* [[TMP2]] to i16 addrspace(42)*
-; CHECK-NEXT: ret i16 addrspace(42)* [[TMP3]]
+; CHECK-NEXT: [[TMP1:%.*]] = call ptr @llvm.strip.invariant.group.p0(ptr [[A:%.*]])
+; CHECK-NEXT: [[TMP3:%.*]] = addrspacecast ptr [[TMP1]] to ptr addrspace(42)
+; CHECK-NEXT: ret ptr addrspace(42) [[TMP3]]
;
- %cast = addrspacecast i8* %a to i8 addrspace(42)*
- %a2 = call i8 addrspace(42)* @llvm.launder.invariant.group.p42i8(i8 addrspace(42)* %cast)
- %c1 = bitcast i8 addrspace(42)* %a2 to i16 addrspace(42)*
+ %cast = addrspacecast ptr %a to ptr addrspace(42)
+ %a2 = call ptr addrspace(42) @llvm.launder.invariant.group.p42(ptr addrspace(42) %cast)
- %a3 = call i16 addrspace(42)* @llvm.strip.invariant.group.p42i16(i16 addrspace(42)* %c1)
- ret i16 addrspace(42)* %a3
+ %a3 = call ptr addrspace(42) @llvm.strip.invariant.group.p42(ptr addrspace(42) %a2)
+ ret ptr addrspace(42) %a3
}
-define i1 @icmp_null_launder(i8* %a) {
+define i1 @icmp_null_launder(ptr %a) {
; CHECK-LABEL: @icmp_null_launder(
-; CHECK-NEXT: [[R:%.*]] = icmp eq i8* [[A:%.*]], null
+; CHECK-NEXT: [[R:%.*]] = icmp eq ptr [[A:%.*]], null
; CHECK-NEXT: ret i1 [[R]]
;
- %a2 = call i8* @llvm.launder.invariant.group.p0i8(i8* %a)
- %r = icmp eq i8* %a2, null
+ %a2 = call ptr @llvm.launder.invariant.group.p0(ptr %a)
+ %r = icmp eq ptr %a2, null
ret i1 %r
}
-define i1 @icmp_null_strip(i8* %a) {
+define i1 @icmp_null_strip(ptr %a) {
; CHECK-LABEL: @icmp_null_strip(
-; CHECK-NEXT: [[R:%.*]] = icmp eq i8* [[A:%.*]], null
+; CHECK-NEXT: [[R:%.*]] = icmp eq ptr [[A:%.*]], null
; CHECK-NEXT: ret i1 [[R]]
;
- %a2 = call i8* @llvm.strip.invariant.group.p0i8(i8* %a)
- %r = icmp eq i8* %a2, null
+ %a2 = call ptr @llvm.strip.invariant.group.p0(ptr %a)
+ %r = icmp eq ptr %a2, null
ret i1 %r
}
-define i1 @icmp_null_launder_valid_null(i8* %a) #0 {
+define i1 @icmp_null_launder_valid_null(ptr %a) #0 {
; CHECK-LABEL: @icmp_null_launder_valid_null(
-; CHECK-NEXT: [[A2:%.*]] = call i8* @llvm.launder.invariant.group.p0i8(i8* [[A:%.*]])
-; CHECK-NEXT: [[R:%.*]] = icmp eq i8* [[A2]], null
+; CHECK-NEXT: [[A2:%.*]] = call ptr @llvm.launder.invariant.group.p0(ptr [[A:%.*]])
+; CHECK-NEXT: [[R:%.*]] = icmp eq ptr [[A2]], null
; CHECK-NEXT: ret i1 [[R]]
;
- %a2 = call i8* @llvm.launder.invariant.group.p0i8(i8* %a)
- %r = icmp eq i8* %a2, null
+ %a2 = call ptr @llvm.launder.invariant.group.p0(ptr %a)
+ %r = icmp eq ptr %a2, null
ret i1 %r
}
-define i1 @icmp_null_strip_valid_null(i8* %a) #0 {
+define i1 @icmp_null_strip_valid_null(ptr %a) #0 {
; CHECK-LABEL: @icmp_null_strip_valid_null(
-; CHECK-NEXT: [[A2:%.*]] = call i8* @llvm.strip.invariant.group.p0i8(i8* [[A:%.*]])
-; CHECK-NEXT: [[R:%.*]] = icmp eq i8* [[A2]], null
+; CHECK-NEXT: [[A2:%.*]] = call ptr @llvm.strip.invariant.group.p0(ptr [[A:%.*]])
+; CHECK-NEXT: [[R:%.*]] = icmp eq ptr [[A2]], null
; CHECK-NEXT: ret i1 [[R]]
;
- %a2 = call i8* @llvm.strip.invariant.group.p0i8(i8* %a)
- %r = icmp eq i8* %a2, null
+ %a2 = call ptr @llvm.strip.invariant.group.p0(ptr %a)
+ %r = icmp eq ptr %a2, null
ret i1 %r
}
; Check that null always becomes the RHS
-define i1 @icmp_null_launder_lhs(i8* %a) {
+define i1 @icmp_null_launder_lhs(ptr %a) {
; CHECK-LABEL: @icmp_null_launder_lhs(
-; CHECK-NEXT: [[R:%.*]] = icmp eq i8* [[A:%.*]], null
+; CHECK-NEXT: [[R:%.*]] = icmp eq ptr [[A:%.*]], null
; CHECK-NEXT: ret i1 [[R]]
;
- %a2 = call i8* @llvm.launder.invariant.group.p0i8(i8* %a)
- %r = icmp eq i8* null, %a2
+ %a2 = call ptr @llvm.launder.invariant.group.p0(ptr %a)
+ %r = icmp eq ptr null, %a2
ret i1 %r
}
-define i1 @icmp_null_launder_bitcasts(i32* %a) {
+define i1 @icmp_null_launder_bitcasts(ptr %a) {
; CHECK-LABEL: @icmp_null_launder_bitcasts(
-; CHECK-NEXT: [[R:%.*]] = icmp eq i32* [[A:%.*]], null
+; CHECK-NEXT: [[R:%.*]] = icmp eq ptr [[A:%.*]], null
; CHECK-NEXT: ret i1 [[R]]
;
- %a2 = bitcast i32* %a to i8*
- %a3 = call i8* @llvm.launder.invariant.group.p0i8(i8* %a2)
- %a4 = bitcast i8* %a3 to i32*
- %r = icmp eq i32* %a4, null
+ %a3 = call ptr @llvm.launder.invariant.group.p0(ptr %a)
+ %r = icmp eq ptr %a3, null
ret i1 %r
}
-declare i8* @llvm.launder.invariant.group.p0i8(i8*)
-declare i8 addrspace(42)* @llvm.launder.invariant.group.p42i8(i8 addrspace(42)*)
-declare i8* @llvm.strip.invariant.group.p0i8(i8*)
-declare i8 addrspace(42)* @llvm.strip.invariant.group.p42i8(i8 addrspace(42)*)
-declare i16* @llvm.strip.invariant.group.p0i16(i16* %c1)
-declare i16 addrspace(42)* @llvm.strip.invariant.group.p42i16(i16 addrspace(42)* %c1)
+declare ptr @llvm.launder.invariant.group.p0(ptr)
+declare ptr addrspace(42) @llvm.launder.invariant.group.p42(ptr addrspace(42))
+declare ptr @llvm.strip.invariant.group.p0(ptr)
+declare ptr addrspace(42) @llvm.strip.invariant.group.p42(ptr addrspace(42))
attributes #0 = { null_pointer_is_valid }
; Test to make sure unused llvm.invariant.start calls are not trivially eliminated
; RUN: opt < %s -passes=instcombine -S | FileCheck %s
-declare void @g(i8*)
-declare void @g_addr1(i8 addrspace(1)*)
+declare void @g(ptr)
+declare void @g_addr1(ptr addrspace(1))
-declare {}* @llvm.invariant.start.p0i8(i64, i8* nocapture) nounwind readonly
-declare {}* @llvm.invariant.start.p1i8(i64, i8 addrspace(1)* nocapture) nounwind readonly
+declare ptr @llvm.invariant.start.p0(i64, ptr nocapture) nounwind readonly
+declare ptr @llvm.invariant.start.p1(i64, ptr addrspace(1) nocapture) nounwind readonly
define i8 @f() {
- %a = alloca i8 ; <i8*> [#uses=4]
- store i8 0, i8* %a
- %i = call {}* @llvm.invariant.start.p0i8(i64 1, i8* %a) ; <{}*> [#uses=0]
- ; CHECK: call {}* @llvm.invariant.start.p0i8
- call void @g(i8* %a)
- %r = load i8, i8* %a ; <i8> [#uses=1]
+ %a = alloca i8 ; <ptr> [#uses=4]
+ store i8 0, ptr %a
+ %i = call ptr @llvm.invariant.start.p0(i64 1, ptr %a) ; <ptr> [#uses=0]
+ ; CHECK: call ptr @llvm.invariant.start.p0
+ call void @g(ptr %a)
+ %r = load i8, ptr %a ; <i8> [#uses=1]
ret i8 %r
}
; make sure llvm.invariant.call in non-default addrspace are also not eliminated.
-define i8 @f_addrspace1(i8 addrspace(1)* %a) {
- store i8 0, i8 addrspace(1)* %a
- %i = call {}* @llvm.invariant.start.p1i8(i64 1, i8 addrspace(1)* %a) ; <{}*> [#uses=0]
- ; CHECK: call {}* @llvm.invariant.start.p1i8
- call void @g_addr1(i8 addrspace(1)* %a)
- %r = load i8, i8 addrspace(1)* %a ; <i8> [#uses=1]
+define i8 @f_addrspace1(ptr addrspace(1) %a) {
+ store i8 0, ptr addrspace(1) %a
+ %i = call ptr @llvm.invariant.start.p1(i64 1, ptr addrspace(1) %a) ; <ptr> [#uses=0]
+ ; CHECK: call ptr @llvm.invariant.start.p1
+ call void @g_addr1(ptr addrspace(1) %a)
+ %r = load i8, ptr addrspace(1) %a ; <i8> [#uses=1]
ret i8 %r
}
target datalayout = "e-p:64:64:64-i1:8:8-i8:8:8-i16:16:16-i32:32:32-i64:64:64-f32:32:32-f64:64:64-v64:64:64-v128:128:128-a0:0:64-s0:64:64-f80:128:128-n8:16:32:64-S128"
declare i32 @__gxx_personality_v0(...)
-declare void @__cxa_call_unexpected(i8*)
-declare i64 @llvm.objectsize.i64(i8*, i1) nounwind readonly
-declare i8* @_Znwm(i64)
+declare void @__cxa_call_unexpected(ptr)
+declare i64 @llvm.objectsize.i64(ptr, i1) nounwind readonly
+declare ptr @_Znwm(i64)
; CHECK-LABEL: @f1(
-define i64 @f1() nounwind uwtable ssp personality i8* bitcast (i32 (...)* @__gxx_personality_v0 to i8*) {
+define i64 @f1() nounwind uwtable ssp personality ptr @__gxx_personality_v0 {
entry:
-; CHECK: nvoke noalias i8* undef()
- %call = invoke noalias i8* undef()
+; CHECK: nvoke noalias ptr undef()
+ %call = invoke noalias ptr undef()
to label %invoke.cont unwind label %lpad
invoke.cont:
; CHECK: ret i64 0
- %0 = tail call i64 @llvm.objectsize.i64(i8* %call, i1 false)
+ %0 = tail call i64 @llvm.objectsize.i64(ptr %call, i1 false)
ret i64 %0
lpad:
- %1 = landingpad { i8*, i32 }
- filter [0 x i8*] zeroinitializer
- %2 = extractvalue { i8*, i32 } %1, 0
- tail call void @__cxa_call_unexpected(i8* %2) noreturn nounwind
+ %1 = landingpad { ptr, i32 }
+ filter [0 x ptr] zeroinitializer
+ %2 = extractvalue { ptr, i32 } %1, 0
+ tail call void @__cxa_call_unexpected(ptr %2) noreturn nounwind
unreachable
}
; CHECK-LABEL: @f2(
-define i64 @f2() nounwind uwtable ssp personality i8* bitcast (i32 (...)* @__gxx_personality_v0 to i8*) {
+define i64 @f2() nounwind uwtable ssp personality ptr @__gxx_personality_v0 {
entry:
-; CHECK: nvoke noalias i8* null()
- %call = invoke noalias i8* null()
+; CHECK: nvoke noalias ptr null()
+ %call = invoke noalias ptr null()
to label %invoke.cont unwind label %lpad
invoke.cont:
; CHECK: ret i64 0
- %0 = tail call i64 @llvm.objectsize.i64(i8* %call, i1 false)
+ %0 = tail call i64 @llvm.objectsize.i64(ptr %call, i1 false)
ret i64 %0
lpad:
- %1 = landingpad { i8*, i32 }
- filter [0 x i8*] zeroinitializer
- %2 = extractvalue { i8*, i32 } %1, 0
- tail call void @__cxa_call_unexpected(i8* %2) noreturn nounwind
+ %1 = landingpad { ptr, i32 }
+ filter [0 x ptr] zeroinitializer
+ %2 = extractvalue { ptr, i32 } %1, 0
+ tail call void @__cxa_call_unexpected(ptr %2) noreturn nounwind
unreachable
}
; CHECK-LABEL: @f2_no_null_opt(
-define i64 @f2_no_null_opt() nounwind uwtable ssp #0 personality i8* bitcast (i32 (...)* @__gxx_personality_v0 to i8*) {
+define i64 @f2_no_null_opt() nounwind uwtable ssp #0 personality ptr @__gxx_personality_v0 {
entry:
-; CHECK: invoke noalias i8* null()
- %call = invoke noalias i8* null()
+; CHECK: invoke noalias ptr null()
+ %call = invoke noalias ptr null()
to label %invoke.cont unwind label %lpad
invoke.cont:
-; CHECK: call i64 @llvm.objectsize.i64.p0i8(i8* %call, i1 false, i1 false, i1 false)
- %0 = tail call i64 @llvm.objectsize.i64(i8* %call, i1 false)
+; CHECK: call i64 @llvm.objectsize.i64.p0(ptr %call, i1 false, i1 false, i1 false)
+ %0 = tail call i64 @llvm.objectsize.i64(ptr %call, i1 false)
ret i64 %0
lpad:
- %1 = landingpad { i8*, i32 }
- filter [0 x i8*] zeroinitializer
- %2 = extractvalue { i8*, i32 } %1, 0
- tail call void @__cxa_call_unexpected(i8* %2) noreturn nounwind
+ %1 = landingpad { ptr, i32 }
+ filter [0 x ptr] zeroinitializer
+ %2 = extractvalue { ptr, i32 } %1, 0
+ tail call void @__cxa_call_unexpected(ptr %2) noreturn nounwind
unreachable
}
attributes #0 = { null_pointer_is_valid }
; CHECK-LABEL: @f3(
-define void @f3() nounwind uwtable ssp personality i8* bitcast (i32 (...)* @__gxx_personality_v0 to i8*) {
+define void @f3() nounwind uwtable ssp personality ptr @__gxx_personality_v0 {
; CHECK: invoke void @llvm.donothing()
- %call = invoke noalias i8* @_Znwm(i64 13)
+ %call = invoke noalias ptr @_Znwm(i64 13)
to label %invoke.cont unwind label %lpad
invoke.cont:
ret void
lpad:
- %1 = landingpad { i8*, i32 }
- filter [0 x i8*] zeroinitializer
- %2 = extractvalue { i8*, i32 } %1, 0
- tail call void @__cxa_call_unexpected(i8* %2) noreturn nounwind
+ %1 = landingpad { ptr, i32 }
+ filter [0 x ptr] zeroinitializer
+ %2 = extractvalue { ptr, i32 } %1, 0
+ tail call void @__cxa_call_unexpected(ptr %2) noreturn nounwind
unreachable
}
}
; Test that exposed a bug in the PHI handling after D60846. No folding should happen here!
-define void @D60846_miscompile(i1* %p) {
+define void @D60846_miscompile(ptr %p) {
; CHECK-LABEL: @D60846_miscompile(
; CHECK-NEXT: entry:
; CHECK-NEXT: br label [[LOOP:%.*]]
; CHECK-NEXT: br i1 [[IS_ZERO]], label [[COMMON]], label [[NON_ZERO:%.*]]
; CHECK: non_zero:
; CHECK-NEXT: [[IS_ONE:%.*]] = icmp eq i16 [[I]], 1
-; CHECK-NEXT: store i1 [[IS_ONE]], i1* [[P:%.*]], align 1
+; CHECK-NEXT: store i1 [[IS_ONE]], ptr [[P:%.*]], align 1
; CHECK-NEXT: br label [[COMMON]]
; CHECK: common:
; CHECK-NEXT: [[I_INC]] = add i16 [[I]], 1
non_zero: ; preds = %loop
%is_one = icmp eq i16 %i, 1
- store i1 %is_one, i1* %p
+ store i1 %is_one, ptr %p
br label %common
common: ; preds = %non_zero, %loop
; END.
%struct.p = type <{ i8, i32 }>
-@t = global %struct.p <{ i8 1, i32 10 }> ; <%struct.p*> [#uses=1]
-@u = weak global %struct.p zeroinitializer ; <%struct.p*> [#uses=1]
+@t = global %struct.p <{ i8 1, i32 10 }> ; <ptr> [#uses=1]
+@u = weak global %struct.p zeroinitializer ; <ptr> [#uses=1]
define i32 @main() {
entry:
- %retval = alloca i32, align 4 ; <i32*> [#uses=2]
- %tmp = alloca i32, align 4 ; <i32*> [#uses=2]
- %tmp1 = alloca i32, align 4 ; <i32*> [#uses=3]
+ %retval = alloca i32, align 4 ; <ptr> [#uses=2]
+ %tmp = alloca i32, align 4 ; <ptr> [#uses=2]
+ %tmp1 = alloca i32, align 4 ; <ptr> [#uses=3]
%"alloca point" = bitcast i32 0 to i32 ; <i32> [#uses=0]
- %tmp3 = load i32, i32* getelementptr (%struct.p, %struct.p* @t, i32 0, i32 1), align 1 ; <i32> [#uses=1]
- store i32 %tmp3, i32* %tmp1, align 4
- %tmp5 = load i32, i32* %tmp1, align 4 ; <i32> [#uses=1]
- store i32 %tmp5, i32* getelementptr (%struct.p, %struct.p* @u, i32 0, i32 1), align 1
- %tmp6 = load i32, i32* %tmp1, align 4 ; <i32> [#uses=1]
- store i32 %tmp6, i32* %tmp, align 4
- %tmp7 = load i32, i32* %tmp, align 4 ; <i32> [#uses=1]
- store i32 %tmp7, i32* %retval, align 4
+ %tmp3 = load i32, ptr getelementptr (%struct.p, ptr @t, i32 0, i32 1), align 1 ; <i32> [#uses=1]
+ store i32 %tmp3, ptr %tmp1, align 4
+ %tmp5 = load i32, ptr %tmp1, align 4 ; <i32> [#uses=1]
+ store i32 %tmp5, ptr getelementptr (%struct.p, ptr @u, i32 0, i32 1), align 1
+ %tmp6 = load i32, ptr %tmp1, align 4 ; <i32> [#uses=1]
+ store i32 %tmp6, ptr %tmp, align 4
+ %tmp7 = load i32, ptr %tmp, align 4 ; <i32> [#uses=1]
+ store i32 %tmp7, ptr %retval, align 4
br label %return
return: ; preds = %entry
- %retval8 = load i32, i32* %retval ; <i32> [#uses=1]
+ %retval8 = load i32, ptr %retval ; <i32> [#uses=1]
ret i32 %retval8
}
; RUN: opt < %s -passes=instcombine -S | FileCheck %s
-declare void @llvm.lifetime.start.p0i8(i64, i8* nocapture)
-declare void @llvm.lifetime.end.p0i8(i64, i8* nocapture)
-declare void @foo(i8* nocapture)
+declare void @llvm.lifetime.start.p0(i64, ptr nocapture)
+declare void @llvm.lifetime.end.p0(i64, ptr nocapture)
+declare void @foo(ptr nocapture)
define void @asan() sanitize_address {
entry:
; CHECK-LABEL: @asan(
%text = alloca i8, align 1
- call void @llvm.lifetime.start.p0i8(i64 1, i8* %text)
- call void @llvm.lifetime.end.p0i8(i64 1, i8* %text)
+ call void @llvm.lifetime.start.p0(i64 1, ptr %text)
+ call void @llvm.lifetime.end.p0(i64 1, ptr %text)
; CHECK: call void @llvm.lifetime.start
; CHECK-NEXT: call void @llvm.lifetime.end
- call void @foo(i8* %text) ; Keep alloca alive
+ call void @foo(ptr %text) ; Keep alloca alive
ret void
}
; CHECK-LABEL: @hwasan(
%text = alloca i8, align 1
- call void @llvm.lifetime.start.p0i8(i64 1, i8* %text)
- call void @llvm.lifetime.end.p0i8(i64 1, i8* %text)
+ call void @llvm.lifetime.start.p0(i64 1, ptr %text)
+ call void @llvm.lifetime.end.p0(i64 1, ptr %text)
; CHECK: call void @llvm.lifetime.start
; CHECK-NEXT: call void @llvm.lifetime.end
- call void @foo(i8* %text) ; Keep alloca alive
+ call void @foo(ptr %text) ; Keep alloca alive
ret void
}
; CHECK-LABEL: @msan(
%text = alloca i8, align 1
- call void @llvm.lifetime.start.p0i8(i64 1, i8* %text)
- call void @llvm.lifetime.end.p0i8(i64 1, i8* %text)
+ call void @llvm.lifetime.start.p0(i64 1, ptr %text)
+ call void @llvm.lifetime.end.p0(i64 1, ptr %text)
; CHECK: call void @llvm.lifetime.start
; CHECK-NEXT: call void @llvm.lifetime.end
- call void @foo(i8* %text) ; Keep alloca alive
+ call void @foo(ptr %text) ; Keep alloca alive
ret void
}
; CHECK-LABEL: @no_asan(
%text = alloca i8, align 1
- call void @llvm.lifetime.start.p0i8(i64 1, i8* %text)
- call void @llvm.lifetime.end.p0i8(i64 1, i8* %text)
+ call void @llvm.lifetime.start.p0(i64 1, ptr %text)
+ call void @llvm.lifetime.end.p0(i64 1, ptr %text)
; CHECK-NO: call void @llvm.lifetime
- call void @foo(i8* %text) ; Keep alloca alive
+ call void @foo(ptr %text) ; Keep alloca alive
ret void
}
; RUN: opt < %s -instcombine -instcombine-infinite-loop-threshold=2 -S | FileCheck %s
declare void @llvm.dbg.declare(metadata, metadata, metadata)
-declare void @llvm.lifetime.start.p0i8(i64, i8* nocapture)
-declare void @llvm.lifetime.end.p0i8(i64, i8* nocapture)
-declare void @foo(i8* nocapture, i8* nocapture)
+declare void @llvm.lifetime.start.p0(i64, ptr nocapture)
+declare void @llvm.lifetime.end.p0(i64, ptr nocapture)
+declare void @foo(ptr nocapture, ptr nocapture)
define void @bar(i1 %flag) !dbg !4 {
; CHECK-LABEL: @bar(
; CHECK: bb2:
; CHECK-NEXT: br label [[BB3:%.*]]
; CHECK: bb3:
-; CHECK-NEXT: call void @llvm.dbg.declare(metadata [1 x i8]* [[TEXT]], metadata [[META16:![0-9]+]], metadata !DIExpression()), !dbg [[DBG24:![0-9]+]]
+; CHECK-NEXT: call void @llvm.dbg.declare(metadata ptr [[TEXT]], metadata [[META16:![0-9]+]], metadata !DIExpression()), !dbg [[DBG24:![0-9]+]]
; CHECK-NEXT: br label [[FIN:%.*]]
; CHECK: else:
-; CHECK-NEXT: [[TMP0:%.*]] = getelementptr inbounds [1 x i8], [1 x i8]* [[TEXT]], i64 0, i64 0
-; CHECK-NEXT: [[TMP1:%.*]] = getelementptr inbounds [1 x i8], [1 x i8]* [[BUFF]], i64 0, i64 0
-; CHECK-NEXT: call void @llvm.lifetime.start.p0i8(i64 1, i8* nonnull [[TMP0]])
-; CHECK-NEXT: call void @llvm.lifetime.start.p0i8(i64 1, i8* nonnull [[TMP1]])
-; CHECK-NEXT: call void @foo(i8* nonnull [[TMP1]], i8* nonnull [[TMP0]])
-; CHECK-NEXT: call void @llvm.lifetime.end.p0i8(i64 1, i8* nonnull [[TMP1]])
-; CHECK-NEXT: call void @llvm.lifetime.end.p0i8(i64 1, i8* nonnull [[TMP0]])
+; CHECK-NEXT: call void @llvm.lifetime.start.p0(i64 1, ptr nonnull [[TEXT]])
+; CHECK-NEXT: call void @llvm.lifetime.start.p0(i64 1, ptr nonnull [[BUFF]])
+; CHECK-NEXT: call void @foo(ptr nonnull [[BUFF]], ptr nonnull [[TEXT]])
+; CHECK-NEXT: call void @llvm.lifetime.end.p0(i64 1, ptr nonnull [[BUFF]])
+; CHECK-NEXT: call void @llvm.lifetime.end.p0(i64 1, ptr nonnull [[TEXT]])
; CHECK-NEXT: br label [[FIN]]
; CHECK: fin:
; CHECK-NEXT: ret void
entry:
%text = alloca [1 x i8], align 1
%buff = alloca [1 x i8], align 1
- %0 = getelementptr inbounds [1 x i8], [1 x i8]* %text, i64 0, i64 0
- %1 = getelementptr inbounds [1 x i8], [1 x i8]* %buff, i64 0, i64 0
br i1 %flag, label %if, label %else
if:
- call void @llvm.lifetime.start.p0i8(i64 1, i8* %0)
- call void @llvm.lifetime.start.p0i8(i64 1, i8* %1)
- call void @llvm.lifetime.end.p0i8(i64 1, i8* %1)
- call void @llvm.lifetime.end.p0i8(i64 1, i8* %0)
+ call void @llvm.lifetime.start.p0(i64 1, ptr %text)
+ call void @llvm.lifetime.start.p0(i64 1, ptr %buff)
+ call void @llvm.lifetime.end.p0(i64 1, ptr %buff)
+ call void @llvm.lifetime.end.p0(i64 1, ptr %text)
br label %bb2
bb2:
- call void @llvm.lifetime.start.p0i8(i64 1, i8* %0)
- call void @llvm.lifetime.start.p0i8(i64 1, i8* %1)
- call void @llvm.lifetime.end.p0i8(i64 1, i8* %0)
- call void @llvm.lifetime.end.p0i8(i64 1, i8* %1)
+ call void @llvm.lifetime.start.p0(i64 1, ptr %text)
+ call void @llvm.lifetime.start.p0(i64 1, ptr %buff)
+ call void @llvm.lifetime.end.p0(i64 1, ptr %text)
+ call void @llvm.lifetime.end.p0(i64 1, ptr %buff)
br label %bb3
bb3:
- call void @llvm.lifetime.start.p0i8(i64 1, i8* %0)
- call void @llvm.dbg.declare(metadata [1 x i8]* %text, metadata !14, metadata !25), !dbg !26
- call void @llvm.lifetime.end.p0i8(i64 1, i8* %0)
+ call void @llvm.lifetime.start.p0(i64 1, ptr %text)
+ call void @llvm.dbg.declare(metadata ptr %text, metadata !14, metadata !25), !dbg !26
+ call void @llvm.lifetime.end.p0(i64 1, ptr %text)
br label %fin
else:
- call void @llvm.lifetime.start.p0i8(i64 1, i8* %0)
- call void @llvm.lifetime.start.p0i8(i64 1, i8* %1)
- call void @foo(i8* %1, i8* %0)
- call void @llvm.lifetime.end.p0i8(i64 1, i8* %1)
- call void @llvm.lifetime.end.p0i8(i64 1, i8* %0)
+ call void @llvm.lifetime.start.p0(i64 1, ptr %text)
+ call void @llvm.lifetime.start.p0(i64 1, ptr %buff)
+ call void @foo(ptr %buff, ptr %text)
+ call void @llvm.lifetime.end.p0(i64 1, ptr %buff)
+ call void @llvm.lifetime.end.p0(i64 1, ptr %text)
br label %fin
fin:
; CHECK-NEXT: ret void
; CHECK: for.body:
; CHECK-NEXT: [[TMP0:%.*]] = zext i32 [[I_0]] to i64
-; CHECK-NEXT: [[ARRAYIDX:%.*]] = getelementptr inbounds [1000 x float], [1000 x float]* @a, i64 0, i64 [[TMP0]]
-; CHECK-NEXT: [[ARRAYIDX2:%.*]] = getelementptr inbounds [1000 x float], [1000 x float]* @b, i64 0, i64 [[TMP0]]
-; CHECK-NEXT: [[TMP1:%.*]] = load float, float* [[ARRAYIDX]], align 4
-; CHECK-NEXT: [[TMP2:%.*]] = load float, float* [[ARRAYIDX2]], align 4
+; CHECK-NEXT: [[ARRAYIDX:%.*]] = getelementptr inbounds [1000 x float], ptr @a, i64 0, i64 [[TMP0]]
+; CHECK-NEXT: [[ARRAYIDX2:%.*]] = getelementptr inbounds [1000 x float], ptr @b, i64 0, i64 [[TMP0]]
+; CHECK-NEXT: [[TMP1:%.*]] = load float, ptr [[ARRAYIDX]], align 4
+; CHECK-NEXT: [[TMP2:%.*]] = load float, ptr [[ARRAYIDX2]], align 4
; CHECK-NEXT: [[CMP_I:%.*]] = fcmp fast olt float [[TMP1]], [[TMP2]]
; CHECK-NEXT: [[TMP3:%.*]] = select i1 [[CMP_I]], float [[TMP2]], float [[TMP1]]
-; CHECK-NEXT: store float [[TMP3]], float* [[ARRAYIDX]], align 4
+; CHECK-NEXT: store float [[TMP3]], ptr [[ARRAYIDX]], align 4
; CHECK-NEXT: [[INC]] = add nuw nsw i32 [[I_0]], 1
; CHECK-NEXT: br label [[FOR_COND]]
;
for.body: ; preds = %for.cond
%0 = zext i32 %i.0 to i64
- %arrayidx = getelementptr inbounds [1000 x float], [1000 x float]* @a, i64 0, i64 %0
- %arrayidx2 = getelementptr inbounds [1000 x float], [1000 x float]* @b, i64 0, i64 %0
- %1 = load float, float* %arrayidx, align 4
- %2 = load float, float* %arrayidx2, align 4
+ %arrayidx = getelementptr inbounds [1000 x float], ptr @a, i64 0, i64 %0
+ %arrayidx2 = getelementptr inbounds [1000 x float], ptr @b, i64 0, i64 %0
+ %1 = load float, ptr %arrayidx, align 4
+ %2 = load float, ptr %arrayidx2, align 4
%cmp.i = fcmp fast olt float %1, %2
- %__b.__a.i = select i1 %cmp.i, float* %arrayidx2, float* %arrayidx
- %3 = bitcast float* %__b.__a.i to i32*
- %4 = load i32, i32* %3, align 4
- %5 = bitcast float* %arrayidx to i32*
- store i32 %4, i32* %5, align 4
+ %__b.__a.i = select i1 %cmp.i, ptr %arrayidx2, ptr %arrayidx
+ %3 = load i32, ptr %__b.__a.i, align 4
+ store i32 %3, ptr %arrayidx, align 4
%inc = add nuw nsw i32 %i.0, 1
br label %for.cond
}
-define i32 @store_bitcasted_load(i1 %cond, float* dereferenceable(4) %addr1, float* dereferenceable(4) %addr2) {
+define i32 @store_bitcasted_load(i1 %cond, ptr dereferenceable(4) %addr1, ptr dereferenceable(4) %addr2) {
; CHECK-LABEL: @store_bitcasted_load(
-; CHECK-NEXT: [[SEL:%.*]] = select i1 [[COND:%.*]], float* [[ADDR1:%.*]], float* [[ADDR2:%.*]]
-; CHECK-NEXT: [[BC1:%.*]] = bitcast float* [[SEL]] to i32*
-; CHECK-NEXT: [[LD:%.*]] = load i32, i32* [[BC1]], align 4
+; CHECK-NEXT: [[SEL:%.*]] = select i1 [[COND:%.*]], ptr [[ADDR1:%.*]], ptr [[ADDR2:%.*]]
+; CHECK-NEXT: [[LD:%.*]] = load i32, ptr [[SEL]], align 4
; CHECK-NEXT: ret i32 [[LD]]
;
- %sel = select i1 %cond, float* %addr1, float* %addr2
- %bc1 = bitcast float* %sel to i32*
- %ld = load i32, i32* %bc1
+ %sel = select i1 %cond, ptr %addr1, ptr %addr2
+ %ld = load i32, ptr %sel
ret i32 %ld
}
-define void @bitcasted_store(i1 %cond, float* %loadaddr1, float* %loadaddr2, float* %storeaddr) {
+define void @bitcasted_store(i1 %cond, ptr %loadaddr1, ptr %loadaddr2, ptr %storeaddr) {
; CHECK-LABEL: @bitcasted_store(
-; CHECK-NEXT: [[SEL:%.*]] = select i1 [[COND:%.*]], float* [[LOADADDR1:%.*]], float* [[LOADADDR2:%.*]]
-; CHECK-NEXT: [[INT_LOAD_ADDR:%.*]] = bitcast float* [[SEL]] to i32*
-; CHECK-NEXT: [[LD:%.*]] = load i32, i32* [[INT_LOAD_ADDR]], align 4
-; CHECK-NEXT: [[INT_STORE_ADDR:%.*]] = bitcast float* [[STOREADDR:%.*]] to i32*
-; CHECK-NEXT: store i32 [[LD]], i32* [[INT_STORE_ADDR]], align 4
+; CHECK-NEXT: [[SEL:%.*]] = select i1 [[COND:%.*]], ptr [[LOADADDR1:%.*]], ptr [[LOADADDR2:%.*]]
+; CHECK-NEXT: [[LD:%.*]] = load i32, ptr [[SEL]], align 4
+; CHECK-NEXT: store i32 [[LD]], ptr [[STOREADDR:%.*]], align 4
; CHECK-NEXT: ret void
;
- %sel = select i1 %cond, float* %loadaddr1, float* %loadaddr2
- %int_load_addr = bitcast float* %sel to i32*
- %ld = load i32, i32* %int_load_addr
- %int_store_addr = bitcast float* %storeaddr to i32*
- store i32 %ld, i32* %int_store_addr
+ %sel = select i1 %cond, ptr %loadaddr1, ptr %loadaddr2
+ %ld = load i32, ptr %sel
+ store i32 %ld, ptr %storeaddr
ret void
}
-define void @bitcasted_minmax_with_select_of_pointers(float* %loadaddr1, float* %loadaddr2, float* %storeaddr) {
+define void @bitcasted_minmax_with_select_of_pointers(ptr %loadaddr1, ptr %loadaddr2, ptr %storeaddr) {
; CHECK-LABEL: @bitcasted_minmax_with_select_of_pointers(
-; CHECK-NEXT: [[LD1:%.*]] = load float, float* [[LOADADDR1:%.*]], align 4
-; CHECK-NEXT: [[LD2:%.*]] = load float, float* [[LOADADDR2:%.*]], align 4
+; CHECK-NEXT: [[LD1:%.*]] = load float, ptr [[LOADADDR1:%.*]], align 4
+; CHECK-NEXT: [[LD2:%.*]] = load float, ptr [[LOADADDR2:%.*]], align 4
; CHECK-NEXT: [[COND:%.*]] = fcmp ogt float [[LD1]], [[LD2]]
; CHECK-NEXT: [[LD3:%.*]] = select i1 [[COND]], float [[LD1]], float [[LD2]]
-; CHECK-NEXT: store float [[LD3]], float* [[STOREADDR:%.*]], align 4
+; CHECK-NEXT: store float [[LD3]], ptr [[STOREADDR:%.*]], align 4
; CHECK-NEXT: ret void
;
- %ld1 = load float, float* %loadaddr1, align 4
- %ld2 = load float, float* %loadaddr2, align 4
+ %ld1 = load float, ptr %loadaddr1, align 4
+ %ld2 = load float, ptr %loadaddr2, align 4
%cond = fcmp ogt float %ld1, %ld2
- %sel = select i1 %cond, float* %loadaddr1, float* %loadaddr2
- %int_load_addr = bitcast float* %sel to i32*
- %ld = load i32, i32* %int_load_addr, align 4
- %int_store_addr = bitcast float* %storeaddr to i32*
- store i32 %ld, i32* %int_store_addr, align 4
+ %sel = select i1 %cond, ptr %loadaddr1, ptr %loadaddr2
+ %ld = load i32, ptr %sel, align 4
+ store i32 %ld, ptr %storeaddr, align 4
ret void
}
; NOTE: Assertions have been autogenerated by utils/update_test_checks.py
; RUN: opt -passes=instcombine -S < %s | FileCheck %s
-define float @matching_scalar(<4 x float>* dereferenceable(16) %p) {
+define float @matching_scalar(ptr dereferenceable(16) %p) {
; CHECK-LABEL: @matching_scalar(
-; CHECK-NEXT: [[BC:%.*]] = getelementptr inbounds <4 x float>, <4 x float>* [[P:%.*]], i64 0, i64 0
-; CHECK-NEXT: [[R:%.*]] = load float, float* [[BC]], align 16
+; CHECK-NEXT: [[R:%.*]] = load float, ptr [[P:%.*]], align 16
; CHECK-NEXT: ret float [[R]]
;
- %bc = bitcast <4 x float>* %p to float*
- %r = load float, float* %bc, align 16
+ %r = load float, ptr %p, align 16
ret float %r
}
-define i32 @nonmatching_scalar(<4 x float>* dereferenceable(16) %p) {
+define i32 @nonmatching_scalar(ptr dereferenceable(16) %p) {
; CHECK-LABEL: @nonmatching_scalar(
-; CHECK-NEXT: [[BC:%.*]] = bitcast <4 x float>* [[P:%.*]] to i32*
-; CHECK-NEXT: [[R:%.*]] = load i32, i32* [[BC]], align 16
+; CHECK-NEXT: [[R:%.*]] = load i32, ptr [[P:%.*]], align 16
; CHECK-NEXT: ret i32 [[R]]
;
- %bc = bitcast <4 x float>* %p to i32*
- %r = load i32, i32* %bc, align 16
+ %r = load i32, ptr %p, align 16
ret i32 %r
}
-define i64 @larger_scalar(<4 x float>* dereferenceable(16) %p) {
+define i64 @larger_scalar(ptr dereferenceable(16) %p) {
; CHECK-LABEL: @larger_scalar(
-; CHECK-NEXT: [[BC:%.*]] = bitcast <4 x float>* [[P:%.*]] to i64*
-; CHECK-NEXT: [[R:%.*]] = load i64, i64* [[BC]], align 16
+; CHECK-NEXT: [[R:%.*]] = load i64, ptr [[P:%.*]], align 16
; CHECK-NEXT: ret i64 [[R]]
;
- %bc = bitcast <4 x float>* %p to i64*
- %r = load i64, i64* %bc, align 16
+ %r = load i64, ptr %p, align 16
ret i64 %r
}
-define i8 @smaller_scalar(<4 x float>* dereferenceable(16) %p) {
+define i8 @smaller_scalar(ptr dereferenceable(16) %p) {
; CHECK-LABEL: @smaller_scalar(
-; CHECK-NEXT: [[BC:%.*]] = bitcast <4 x float>* [[P:%.*]] to i8*
-; CHECK-NEXT: [[R:%.*]] = load i8, i8* [[BC]], align 16
+; CHECK-NEXT: [[R:%.*]] = load i8, ptr [[P:%.*]], align 16
; CHECK-NEXT: ret i8 [[R]]
;
- %bc = bitcast <4 x float>* %p to i8*
- %r = load i8, i8* %bc, align 16
+ %r = load i8, ptr %p, align 16
ret i8 %r
}
-define i8 @smaller_scalar_less_aligned(<4 x float>* dereferenceable(16) %p) {
+define i8 @smaller_scalar_less_aligned(ptr dereferenceable(16) %p) {
; CHECK-LABEL: @smaller_scalar_less_aligned(
-; CHECK-NEXT: [[BC:%.*]] = bitcast <4 x float>* [[P:%.*]] to i8*
-; CHECK-NEXT: [[R:%.*]] = load i8, i8* [[BC]], align 4
+; CHECK-NEXT: [[R:%.*]] = load i8, ptr [[P:%.*]], align 4
; CHECK-NEXT: ret i8 [[R]]
;
- %bc = bitcast <4 x float>* %p to i8*
- %r = load i8, i8* %bc, align 4
+ %r = load i8, ptr %p, align 4
ret i8 %r
}
-define float @matching_scalar_small_deref(<4 x float>* dereferenceable(15) %p) {
+define float @matching_scalar_small_deref(ptr dereferenceable(15) %p) {
; CHECK-LABEL: @matching_scalar_small_deref(
-; CHECK-NEXT: [[BC:%.*]] = getelementptr inbounds <4 x float>, <4 x float>* [[P:%.*]], i64 0, i64 0
-; CHECK-NEXT: [[R:%.*]] = load float, float* [[BC]], align 16
+; CHECK-NEXT: [[R:%.*]] = load float, ptr [[P:%.*]], align 16
; CHECK-NEXT: ret float [[R]]
;
- %bc = bitcast <4 x float>* %p to float*
- %r = load float, float* %bc, align 16
+ %r = load float, ptr %p, align 16
ret float %r
}
-define float @matching_scalar_smallest_deref(<4 x float>* dereferenceable(1) %p) {
+define float @matching_scalar_smallest_deref(ptr dereferenceable(1) %p) {
; CHECK-LABEL: @matching_scalar_smallest_deref(
-; CHECK-NEXT: [[BC:%.*]] = getelementptr inbounds <4 x float>, <4 x float>* [[P:%.*]], i64 0, i64 0
-; CHECK-NEXT: [[R:%.*]] = load float, float* [[BC]], align 16
+; CHECK-NEXT: [[R:%.*]] = load float, ptr [[P:%.*]], align 16
; CHECK-NEXT: ret float [[R]]
;
- %bc = bitcast <4 x float>* %p to float*
- %r = load float, float* %bc, align 16
+ %r = load float, ptr %p, align 16
ret float %r
}
-define float @matching_scalar_smallest_deref_or_null(<4 x float>* dereferenceable_or_null(1) %p) {
+define float @matching_scalar_smallest_deref_or_null(ptr dereferenceable_or_null(1) %p) {
; CHECK-LABEL: @matching_scalar_smallest_deref_or_null(
-; CHECK-NEXT: [[BC:%.*]] = getelementptr inbounds <4 x float>, <4 x float>* [[P:%.*]], i64 0, i64 0
-; CHECK-NEXT: [[R:%.*]] = load float, float* [[BC]], align 16
+; CHECK-NEXT: [[R:%.*]] = load float, ptr [[P:%.*]], align 16
; CHECK-NEXT: ret float [[R]]
;
- %bc = bitcast <4 x float>* %p to float*
- %r = load float, float* %bc, align 16
+ %r = load float, ptr %p, align 16
ret float %r
}
-define float @matching_scalar_smallest_deref_addrspace(<4 x float> addrspace(4)* dereferenceable(1) %p) {
+define float @matching_scalar_smallest_deref_addrspace(ptr addrspace(4) dereferenceable(1) %p) {
; CHECK-LABEL: @matching_scalar_smallest_deref_addrspace(
-; CHECK-NEXT: [[BC:%.*]] = getelementptr inbounds <4 x float>, <4 x float> addrspace(4)* [[P:%.*]], i64 0, i64 0
-; CHECK-NEXT: [[R:%.*]] = load float, float addrspace(4)* [[BC]], align 16
+; CHECK-NEXT: [[R:%.*]] = load float, ptr addrspace(4) [[P:%.*]], align 16
; CHECK-NEXT: ret float [[R]]
;
- %bc = bitcast <4 x float> addrspace(4)* %p to float addrspace(4)*
- %r = load float, float addrspace(4)* %bc, align 16
+ %r = load float, ptr addrspace(4) %p, align 16
ret float %r
}
; A null pointer can't be assumed inbounds in a non-default address space.
-define float @matching_scalar_smallest_deref_or_null_addrspace(<4 x float> addrspace(4)* dereferenceable_or_null(1) %p) {
+define float @matching_scalar_smallest_deref_or_null_addrspace(ptr addrspace(4) dereferenceable_or_null(1) %p) {
; CHECK-LABEL: @matching_scalar_smallest_deref_or_null_addrspace(
-; CHECK-NEXT: [[BC:%.*]] = getelementptr <4 x float>, <4 x float> addrspace(4)* [[P:%.*]], i64 0, i64 0
-; CHECK-NEXT: [[R:%.*]] = load float, float addrspace(4)* [[BC]], align 16
+; CHECK-NEXT: [[R:%.*]] = load float, ptr addrspace(4) [[P:%.*]], align 16
; CHECK-NEXT: ret float [[R]]
;
- %bc = bitcast <4 x float> addrspace(4)* %p to float addrspace(4)*
- %r = load float, float addrspace(4)* %bc, align 16
+ %r = load float, ptr addrspace(4) %p, align 16
ret float %r
}
-define float @matching_scalar_volatile(<4 x float>* dereferenceable(16) %p) {
+define float @matching_scalar_volatile(ptr dereferenceable(16) %p) {
; CHECK-LABEL: @matching_scalar_volatile(
-; CHECK-NEXT: [[BC:%.*]] = getelementptr inbounds <4 x float>, <4 x float>* [[P:%.*]], i64 0, i64 0
-; CHECK-NEXT: [[R:%.*]] = load volatile float, float* [[BC]], align 16
+; CHECK-NEXT: [[R:%.*]] = load volatile float, ptr [[P:%.*]], align 16
; CHECK-NEXT: ret float [[R]]
;
- %bc = bitcast <4 x float>* %p to float*
- %r = load volatile float, float* %bc, align 16
+ %r = load volatile float, ptr %p, align 16
ret float %r
}
-define float @nonvector(double* dereferenceable(16) %p) {
+define float @nonvector(ptr dereferenceable(16) %p) {
; CHECK-LABEL: @nonvector(
-; CHECK-NEXT: [[BC:%.*]] = bitcast double* [[P:%.*]] to float*
-; CHECK-NEXT: [[R:%.*]] = load float, float* [[BC]], align 16
+; CHECK-NEXT: [[R:%.*]] = load float, ptr [[P:%.*]], align 16
; CHECK-NEXT: ret float [[R]]
;
- %bc = bitcast double* %p to float*
- %r = load float, float* %bc, align 16
+ %r = load float, ptr %p, align 16
ret float %r
}
target datalayout = "p:32:32:32"
-define i64* @test1(i8* %x) {
+define ptr @test1(ptr %x) {
; CHECK-LABEL: @test1(
; CHECK-NEXT: entry:
-; CHECK-NEXT: [[A:%.*]] = bitcast i8* [[X:%.*]] to i64*
-; CHECK-NEXT: [[B:%.*]] = load i64, i64* [[A]], align 4
+; CHECK-NEXT: [[B:%.*]] = load i64, ptr [[X:%.*]], align 4
; CHECK-NEXT: [[TMP0:%.*]] = trunc i64 [[B]] to i32
-; CHECK-NEXT: [[C:%.*]] = inttoptr i32 [[TMP0]] to i64*
-; CHECK-NEXT: ret i64* [[C]]
+; CHECK-NEXT: [[C:%.*]] = inttoptr i32 [[TMP0]] to ptr
+; CHECK-NEXT: ret ptr [[C]]
;
entry:
- %a = bitcast i8* %x to i64*
- %b = load i64, i64* %a
- %c = inttoptr i64 %b to i64*
+ %b = load i64, ptr %x
+ %c = inttoptr i64 %b to ptr
- ret i64* %c
+ ret ptr %c
}
-define i32* @test2(i8* %x) {
+define ptr @test2(ptr %x) {
; CHECK-LABEL: @test2(
; CHECK-NEXT: entry:
-; CHECK-NEXT: [[A:%.*]] = bitcast i8* [[X:%.*]] to i32*
-; CHECK-NEXT: [[B:%.*]] = load i32, i32* [[A]], align 4
-; CHECK-NEXT: [[C:%.*]] = inttoptr i32 [[B]] to i32*
-; CHECK-NEXT: ret i32* [[C]]
+; CHECK-NEXT: [[B:%.*]] = load i32, ptr [[X:%.*]], align 4
+; CHECK-NEXT: [[C:%.*]] = inttoptr i32 [[B]] to ptr
+; CHECK-NEXT: ret ptr [[C]]
;
entry:
- %a = bitcast i8* %x to i32*
- %b = load i32, i32* %a
- %c = inttoptr i32 %b to i32*
+ %b = load i32, ptr %x
+ %c = inttoptr i32 %b to ptr
- ret i32* %c
+ ret ptr %c
}
-define i64* @test3(i8* %x) {
+define ptr @test3(ptr %x) {
; CHECK-LABEL: @test3(
; CHECK-NEXT: entry:
-; CHECK-NEXT: [[A:%.*]] = bitcast i8* [[X:%.*]] to i32*
-; CHECK-NEXT: [[B:%.*]] = load i32, i32* [[A]], align 4
-; CHECK-NEXT: [[C:%.*]] = inttoptr i32 [[B]] to i64*
-; CHECK-NEXT: ret i64* [[C]]
+; CHECK-NEXT: [[B:%.*]] = load i32, ptr [[X:%.*]], align 4
+; CHECK-NEXT: [[C:%.*]] = inttoptr i32 [[B]] to ptr
+; CHECK-NEXT: ret ptr [[C]]
;
entry:
- %a = bitcast i8* %x to i32*
- %b = load i32, i32* %a
- %c = inttoptr i32 %b to i64*
+ %b = load i32, ptr %x
+ %c = inttoptr i32 %b to ptr
- ret i64* %c
+ ret ptr %c
}
-define i64 @test4(i8* %x) {
+define i64 @test4(ptr %x) {
; CHECK-LABEL: @test4(
; CHECK-NEXT: entry:
-; CHECK-NEXT: [[A:%.*]] = bitcast i8* [[X:%.*]] to i64**
-; CHECK-NEXT: [[B:%.*]] = load i64*, i64** [[A]], align 4
-; CHECK-NEXT: [[TMP0:%.*]] = ptrtoint i64* [[B]] to i32
+; CHECK-NEXT: [[B:%.*]] = load ptr, ptr [[X:%.*]], align 4
+; CHECK-NEXT: [[TMP0:%.*]] = ptrtoint ptr [[B]] to i32
; CHECK-NEXT: [[C:%.*]] = zext i32 [[TMP0]] to i64
; CHECK-NEXT: ret i64 [[C]]
;
entry:
- %a = bitcast i8* %x to i64**
- %b = load i64*, i64** %a
- %c = ptrtoint i64* %b to i64
+ %b = load ptr, ptr %x
+ %c = ptrtoint ptr %b to i64
ret i64 %c
}
-define i32 @test5(i8* %x) {
+define i32 @test5(ptr %x) {
; CHECK-LABEL: @test5(
; CHECK-NEXT: entry:
-; CHECK-NEXT: [[A:%.*]] = bitcast i8* [[X:%.*]] to i32**
-; CHECK-NEXT: [[B:%.*]] = load i32*, i32** [[A]], align 4
-; CHECK-NEXT: [[C:%.*]] = ptrtoint i32* [[B]] to i32
+; CHECK-NEXT: [[B:%.*]] = load ptr, ptr [[X:%.*]], align 4
+; CHECK-NEXT: [[C:%.*]] = ptrtoint ptr [[B]] to i32
; CHECK-NEXT: ret i32 [[C]]
;
entry:
- %a = bitcast i8* %x to i32**
- %b = load i32*, i32** %a
- %c = ptrtoint i32* %b to i32
+ %b = load ptr, ptr %x
+ %c = ptrtoint ptr %b to i32
ret i32 %c
}
-define i64 @test6(i8* %x) {
+define i64 @test6(ptr %x) {
; CHECK-LABEL: @test6(
; CHECK-NEXT: entry:
-; CHECK-NEXT: [[A:%.*]] = bitcast i8* [[X:%.*]] to i32**
-; CHECK-NEXT: [[B:%.*]] = load i32*, i32** [[A]], align 4
-; CHECK-NEXT: [[TMP0:%.*]] = ptrtoint i32* [[B]] to i32
+; CHECK-NEXT: [[B:%.*]] = load ptr, ptr [[X:%.*]], align 4
+; CHECK-NEXT: [[TMP0:%.*]] = ptrtoint ptr [[B]] to i32
; CHECK-NEXT: [[C:%.*]] = zext i32 [[TMP0]] to i64
; CHECK-NEXT: ret i64 [[C]]
;
entry:
- %a = bitcast i8* %x to i32**
- %b = load i32*, i32** %a
- %c = ptrtoint i32* %b to i64
+ %b = load ptr, ptr %x
+ %c = ptrtoint ptr %b to i64
ret i64 %c
}
target datalayout = "p:64:64:64-i64:32:32"
-define i64* @test1(i8* %x) {
+define ptr @test1(ptr %x) {
; CHECK-LABEL: @test1(
; CHECK-NEXT: entry:
-; CHECK-NEXT: [[A:%.*]] = bitcast i8* [[X:%.*]] to i64*
-; CHECK-NEXT: [[B:%.*]] = load i64, i64* [[A]], align 4
-; CHECK-NEXT: [[C:%.*]] = inttoptr i64 [[B]] to i64*
-; CHECK-NEXT: ret i64* [[C]]
+; CHECK-NEXT: [[B:%.*]] = load i64, ptr [[X:%.*]], align 4
+; CHECK-NEXT: [[C:%.*]] = inttoptr i64 [[B]] to ptr
+; CHECK-NEXT: ret ptr [[C]]
;
entry:
- %a = bitcast i8* %x to i64*
- %b = load i64, i64* %a
- %c = inttoptr i64 %b to i64*
+ %b = load i64, ptr %x
+ %c = inttoptr i64 %b to ptr
- ret i64* %c
+ ret ptr %c
}
-define i32* @test2(i8* %x) {
+define ptr @test2(ptr %x) {
; CHECK-LABEL: @test2(
; CHECK-NEXT: entry:
-; CHECK-NEXT: [[A:%.*]] = bitcast i8* [[X:%.*]] to i32*
-; CHECK-NEXT: [[B:%.*]] = load i32, i32* [[A]], align 4
+; CHECK-NEXT: [[B:%.*]] = load i32, ptr [[X:%.*]], align 4
; CHECK-NEXT: [[TMP0:%.*]] = zext i32 [[B]] to i64
-; CHECK-NEXT: [[C:%.*]] = inttoptr i64 [[TMP0]] to i32*
-; CHECK-NEXT: ret i32* [[C]]
+; CHECK-NEXT: [[C:%.*]] = inttoptr i64 [[TMP0]] to ptr
+; CHECK-NEXT: ret ptr [[C]]
;
entry:
- %a = bitcast i8* %x to i32*
- %b = load i32, i32* %a
- %c = inttoptr i32 %b to i32*
+ %b = load i32, ptr %x
+ %c = inttoptr i32 %b to ptr
- ret i32* %c
+ ret ptr %c
}
-define i64* @test3(i8* %x) {
+define ptr @test3(ptr %x) {
; CHECK-LABEL: @test3(
; CHECK-NEXT: entry:
-; CHECK-NEXT: [[A:%.*]] = bitcast i8* [[X:%.*]] to i32*
-; CHECK-NEXT: [[B:%.*]] = load i32, i32* [[A]], align 4
+; CHECK-NEXT: [[B:%.*]] = load i32, ptr [[X:%.*]], align 4
; CHECK-NEXT: [[TMP0:%.*]] = zext i32 [[B]] to i64
-; CHECK-NEXT: [[C:%.*]] = inttoptr i64 [[TMP0]] to i64*
-; CHECK-NEXT: ret i64* [[C]]
+; CHECK-NEXT: [[C:%.*]] = inttoptr i64 [[TMP0]] to ptr
+; CHECK-NEXT: ret ptr [[C]]
;
entry:
- %a = bitcast i8* %x to i32*
- %b = load i32, i32* %a
- %c = inttoptr i32 %b to i64*
+ %b = load i32, ptr %x
+ %c = inttoptr i32 %b to ptr
- ret i64* %c
+ ret ptr %c
}
-define i64 @test4(i8* %x) {
+define i64 @test4(ptr %x) {
; CHECK-LABEL: @test4(
; CHECK-NEXT: entry:
-; CHECK-NEXT: [[A:%.*]] = bitcast i8* [[X:%.*]] to i64**
-; CHECK-NEXT: [[B:%.*]] = load i64*, i64** [[A]], align 8
-; CHECK-NEXT: [[C:%.*]] = ptrtoint i64* [[B]] to i64
+; CHECK-NEXT: [[B:%.*]] = load ptr, ptr [[X:%.*]], align 8
+; CHECK-NEXT: [[C:%.*]] = ptrtoint ptr [[B]] to i64
; CHECK-NEXT: ret i64 [[C]]
;
entry:
- %a = bitcast i8* %x to i64**
- %b = load i64*, i64** %a
- %c = ptrtoint i64* %b to i64
+ %b = load ptr, ptr %x
+ %c = ptrtoint ptr %b to i64
ret i64 %c
}
-define i32 @test5(i8* %x) {
+define i32 @test5(ptr %x) {
; CHECK-LABEL: @test5(
; CHECK-NEXT: entry:
-; CHECK-NEXT: [[A:%.*]] = bitcast i8* [[X:%.*]] to i32**
-; CHECK-NEXT: [[B:%.*]] = load i32*, i32** [[A]], align 8
-; CHECK-NEXT: [[TMP0:%.*]] = ptrtoint i32* [[B]] to i64
+; CHECK-NEXT: [[B:%.*]] = load ptr, ptr [[X:%.*]], align 8
+; CHECK-NEXT: [[TMP0:%.*]] = ptrtoint ptr [[B]] to i64
; CHECK-NEXT: [[C:%.*]] = trunc i64 [[TMP0]] to i32
; CHECK-NEXT: ret i32 [[C]]
;
entry:
- %a = bitcast i8* %x to i32**
- %b = load i32*, i32** %a
- %c = ptrtoint i32* %b to i32
+ %b = load ptr, ptr %x
+ %c = ptrtoint ptr %b to i32
ret i32 %c
}
-define i64 @test6(i8* %x) {
+define i64 @test6(ptr %x) {
; CHECK-LABEL: @test6(
; CHECK-NEXT: entry:
-; CHECK-NEXT: [[A:%.*]] = bitcast i8* [[X:%.*]] to i32**
-; CHECK-NEXT: [[B:%.*]] = load i32*, i32** [[A]], align 8
-; CHECK-NEXT: [[C:%.*]] = ptrtoint i32* [[B]] to i64
+; CHECK-NEXT: [[B:%.*]] = load ptr, ptr [[X:%.*]], align 8
+; CHECK-NEXT: [[C:%.*]] = ptrtoint ptr [[B]] to i64
; CHECK-NEXT: ret i64 [[C]]
;
entry:
- %a = bitcast i8* %x to i32**
- %b = load i32*, i32** %a
- %c = ptrtoint i32* %b to i64
+ %b = load ptr, ptr %x
+ %c = ptrtoint ptr %b to i64
ret i64 %c
}
; CHECK-NEXT: [[R:%.*]] = icmp eq i32 [[X:%.*]], 9
; CHECK-NEXT: ret i1 [[R]]
;
- %P = getelementptr inbounds [10 x i16], [10 x i16]* @G16, i32 0, i32 %X
- %Q = load i16, i16* %P
+ %P = getelementptr inbounds [10 x i16], ptr @G16, i32 0, i32 %X
+ %Q = load i16, ptr %P
%R = icmp eq i16 %Q, 0
ret i1 %R
}
; CHECK-NEXT: [[R:%.*]] = icmp eq i32 [[TMP1]], 9
; CHECK-NEXT: ret i1 [[R]]
;
- %P = getelementptr [10 x i16], [10 x i16]* @G16, i32 0, i32 %X
- %Q = load i16, i16* %P
+ %P = getelementptr [10 x i16], ptr @G16, i32 0, i32 %X
+ %Q = load i16, ptr %P
%R = icmp eq i16 %Q, 0
ret i1 %R
}
; CHECK-NEXT: [[R:%.*]] = icmp eq i64 [[TMP1]], 9
; CHECK-NEXT: ret i1 [[R]]
;
- %P = getelementptr [10 x i16], [10 x i16]* @G16, i64 0, i64 %X
- %Q = load i16, i16* %P
+ %P = getelementptr [10 x i16], ptr @G16, i64 0, i64 %X
+ %Q = load i16, ptr %P
%R = icmp eq i16 %Q, 0
ret i1 %R
}
; CHECK-NEXT: [[R:%.*]] = icmp eq i32 [[TMP1]], 9
; CHECK-NEXT: ret i1 [[R]]
;
- %p = getelementptr [10 x i16], [10 x i16] addrspace(1)* @G16_as1, i16 0, i32 %x
- %q = load i16, i16 addrspace(1)* %p
+ %p = getelementptr [10 x i16], ptr addrspace(1) @G16_as1, i16 0, i32 %x
+ %q = load i16, ptr addrspace(1) %p
%r = icmp eq i16 %q, 0
ret i1 %r
; CHECK-NEXT: [[R:%.*]] = icmp ne i32 [[X:%.*]], 4
; CHECK-NEXT: ret i1 [[R]]
;
- %P = getelementptr inbounds [10 x i16], [10 x i16]* @G16, i32 0, i32 %X
- %Q = load i16, i16* %P
+ %P = getelementptr inbounds [10 x i16], ptr @G16, i32 0, i32 %X
+ %Q = load i16, ptr %P
%R = icmp slt i16 %Q, 85
ret i1 %R
}
; CHECK-NEXT: [[R:%.*]] = icmp eq i32 [[X:%.*]], 1
; CHECK-NEXT: ret i1 [[R]]
;
- %P = getelementptr inbounds [6 x double], [6 x double]* @GD, i32 0, i32 %X
- %Q = load double, double* %P
+ %P = getelementptr inbounds [6 x double], ptr @GD, i32 0, i32 %X
+ %Q = load double, ptr %P
%R = fcmp oeq double %Q, 1.0
ret i1 %R
; CHECK-NEXT: [[R:%.*]] = icmp ne i32 [[TMP2]], 0
; CHECK-NEXT: ret i1 [[R]]
;
- %P = getelementptr inbounds [10 x i16], [10 x i16]* @G16, i32 0, i32 %X
- %Q = load i16, i16* %P
+ %P = getelementptr inbounds [10 x i16], ptr @G16, i32 0, i32 %X
+ %Q = load i16, ptr %P
%R = icmp sle i16 %Q, 73
ret i1 %R
}
; CHECK-NEXT: [[R:%.*]] = icmp ne i32 [[TMP3]], 0
; CHECK-NEXT: ret i1 [[R]]
;
- %P = getelementptr inbounds [10 x i16], [10 x i16]* @G16, i32 0, i16 %X
- %Q = load i16, i16* %P
+ %P = getelementptr inbounds [10 x i16], ptr @G16, i32 0, i16 %X
+ %Q = load i16, ptr %P
%R = icmp sle i16 %Q, 73
ret i1 %R
}
; CHECK-NEXT: [[R:%.*]] = or i1 [[TMP1]], [[TMP2]]
; CHECK-NEXT: ret i1 [[R]]
;
- %P = getelementptr inbounds [10 x i16], [10 x i16]* @G16, i32 0, i32 %X
- %Q = load i16, i16* %P
+ %P = getelementptr inbounds [10 x i16], ptr @G16, i32 0, i32 %X
+ %Q = load i16, ptr %P
%R = icmp eq i16 %Q, 69
ret i1 %R
}
; CHECK-NEXT: [[R:%.*]] = icmp ult i32 [[TMP1]], 3
; CHECK-NEXT: ret i1 [[R]]
;
- %P = getelementptr inbounds [6 x double], [6 x double]* @GD, i32 0, i32 %X
- %Q = load double, double* %P
+ %P = getelementptr inbounds [6 x double], ptr @GD, i32 0, i32 %X
+ %Q = load double, ptr %P
%R = fcmp ogt double %Q, 0.0
ret i1 %R
}
; CHECK-NEXT: [[R:%.*]] = icmp ult i32 [[TMP1]], -3
; CHECK-NEXT: ret i1 [[R]]
;
- %P = getelementptr inbounds [6 x double], [6 x double]* @GD, i32 0, i32 %X
- %Q = load double, double* %P
+ %P = getelementptr inbounds [6 x double], ptr @GD, i32 0, i32 %X
+ %Q = load double, ptr %P
%R = fcmp olt double %Q, 0.0
ret i1 %R
}
; CHECK-NEXT: [[TMP2:%.*]] = icmp eq i32 [[TMP1]], 8
; CHECK-NEXT: ret i1 [[TMP2]]
;
- %P = getelementptr inbounds [10 x i16], [10 x i16]* @G16, i32 0, i32 %X
- %Q = load i16, i16* %P
+ %P = getelementptr inbounds [10 x i16], ptr @G16, i32 0, i32 %X
+ %Q = load i16, ptr %P
%R = and i16 %Q, 3
%S = icmp eq i16 %R, 0
ret i1 %S
; CHECK-NEXT: [[TMP2:%.*]] = icmp ult i32 [[TMP1]], 2
; CHECK-NEXT: ret i1 [[TMP2]]
;
- %P = getelementptr inbounds [4 x { i32, i32 } ], [4 x { i32, i32 } ]* @GA, i32 0, i32 %X, i32 1
- %Q = load i32, i32* %P
+ %P = getelementptr inbounds [4 x { i32, i32 } ], ptr @GA, i32 0, i32 %X, i32 1
+ %Q = load i32, ptr %P
%R = icmp eq i32 %Q, 1
ret i1 %R
}
; CHECK-LABEL: @test10_struct(
; CHECK-NEXT: ret i1 false
;
- %p = getelementptr inbounds %Foo, %Foo* @GS, i32 %x, i32 0
- %q = load i32, i32* %p
+ %p = getelementptr inbounds %Foo, ptr @GS, i32 %x, i32 0
+ %q = load i32, ptr %p
%r = icmp eq i32 %q, 9
ret i1 %r
}
define i1 @test10_struct_noinbounds(i32 %x) {
; CHECK-LABEL: @test10_struct_noinbounds(
-; CHECK-NEXT: [[P:%.*]] = getelementptr [[FOO:%.*]], %Foo* @GS, i32 [[X:%.*]], i32 0
-; CHECK-NEXT: [[Q:%.*]] = load i32, i32* [[P]], align 8
+; CHECK-NEXT: [[P:%.*]] = getelementptr [[FOO:%.*]], ptr @GS, i32 [[X:%.*]], i32 0
+; CHECK-NEXT: [[Q:%.*]] = load i32, ptr [[P]], align 8
; CHECK-NEXT: [[R:%.*]] = icmp eq i32 [[Q]], 9
; CHECK-NEXT: ret i1 [[R]]
;
- %p = getelementptr %Foo, %Foo* @GS, i32 %x, i32 0
- %q = load i32, i32* %p
+ %p = getelementptr %Foo, ptr @GS, i32 %x, i32 0
+ %q = load i32, ptr %p
%r = icmp eq i32 %q, 9
ret i1 %r
}
; CHECK-LABEL: @test10_struct_i16(
; CHECK-NEXT: ret i1 false
;
- %p = getelementptr inbounds %Foo, %Foo* @GS, i16 %x, i32 0
- %q = load i32, i32* %p
+ %p = getelementptr inbounds %Foo, ptr @GS, i16 %x, i32 0
+ %q = load i32, ptr %p
%r = icmp eq i32 %q, 0
ret i1 %r
}
; CHECK-LABEL: @test10_struct_i64(
; CHECK-NEXT: ret i1 false
;
- %p = getelementptr inbounds %Foo, %Foo* @GS, i64 %x, i32 0
- %q = load i32, i32* %p
+ %p = getelementptr inbounds %Foo, ptr @GS, i64 %x, i32 0
+ %q = load i32, ptr %p
%r = icmp eq i32 %q, 0
ret i1 %r
}
define i1 @test10_struct_noinbounds_i16(i16 %x) {
; CHECK-LABEL: @test10_struct_noinbounds_i16(
; CHECK-NEXT: [[TMP1:%.*]] = sext i16 [[X:%.*]] to i32
-; CHECK-NEXT: [[P:%.*]] = getelementptr [[FOO:%.*]], %Foo* @GS, i32 [[TMP1]], i32 0
-; CHECK-NEXT: [[Q:%.*]] = load i32, i32* [[P]], align 8
+; CHECK-NEXT: [[P:%.*]] = getelementptr [[FOO:%.*]], ptr @GS, i32 [[TMP1]], i32 0
+; CHECK-NEXT: [[Q:%.*]] = load i32, ptr [[P]], align 8
; CHECK-NEXT: [[R:%.*]] = icmp eq i32 [[Q]], 0
; CHECK-NEXT: ret i1 [[R]]
;
- %p = getelementptr %Foo, %Foo* @GS, i16 %x, i32 0
- %q = load i32, i32* %p
+ %p = getelementptr %Foo, ptr @GS, i16 %x, i32 0
+ %q = load i32, ptr %p
%r = icmp eq i32 %q, 0
ret i1 %r
}
; CHECK-NEXT: [[R:%.*]] = icmp ne i32 [[X:%.*]], 1
; CHECK-NEXT: ret i1 [[R]]
;
- %p = getelementptr inbounds [4 x %Foo], [4 x %Foo]* @GStructArr, i32 0, i32 %x, i32 2
- %q = load i32, i32* %p
+ %p = getelementptr inbounds [4 x %Foo], ptr @GStructArr, i32 0, i32 %x, i32 2
+ %q = load i32, ptr %p
%r = icmp eq i32 %q, 9
ret i1 %r
}
; CHECK-NEXT: [[R:%.*]] = icmp ne i32 [[TMP1]], 1
; CHECK-NEXT: ret i1 [[R]]
;
- %p = getelementptr [4 x %Foo], [4 x %Foo]* @GStructArr, i32 0, i32 %x, i32 2
- %q = load i32, i32* %p
+ %p = getelementptr [4 x %Foo], ptr @GStructArr, i32 0, i32 %x, i32 2
+ %q = load i32, ptr %p
%r = icmp eq i32 %q, 9
ret i1 %r
}
; CHECK-NEXT: [[R:%.*]] = icmp ne i16 [[X:%.*]], 1
; CHECK-NEXT: ret i1 [[R]]
;
- %p = getelementptr inbounds [4 x %Foo], [4 x %Foo]* @GStructArr, i16 0, i16 %x, i32 2
- %q = load i32, i32* %p
+ %p = getelementptr inbounds [4 x %Foo], ptr @GStructArr, i16 0, i16 %x, i32 2
+ %q = load i32, ptr %p
%r = icmp eq i32 %q, 9
ret i1 %r
}
; CHECK-NEXT: [[R:%.*]] = icmp ne i64 [[TMP1]], 1
; CHECK-NEXT: ret i1 [[R]]
;
- %p = getelementptr inbounds [4 x %Foo], [4 x %Foo]* @GStructArr, i64 0, i64 %x, i32 2
- %q = load i32, i32* %p
+ %p = getelementptr inbounds [4 x %Foo], ptr @GStructArr, i64 0, i64 %x, i32 2
+ %q = load i32, ptr %p
%r = icmp eq i32 %q, 9
ret i1 %r
}
; CHECK-NEXT: [[R:%.*]] = icmp ne i32 [[TMP2]], 1
; CHECK-NEXT: ret i1 [[R]]
;
- %p = getelementptr [4 x %Foo], [4 x %Foo]* @GStructArr, i32 0, i16 %x, i32 2
- %q = load i32, i32* %p
+ %p = getelementptr [4 x %Foo], ptr @GStructArr, i32 0, i16 %x, i32 2
+ %q = load i32, ptr %p
%r = icmp eq i32 %q, 9
ret i1 %r
}
; CHECK-NEXT: [[R:%.*]] = icmp ne i64 [[TMP1]], 1
; CHECK-NEXT: ret i1 [[R]]
;
- %p = getelementptr [4 x %Foo], [4 x %Foo]* @GStructArr, i32 0, i64 %x, i32 2
- %q = load i32, i32* %p
+ %p = getelementptr [4 x %Foo], ptr @GStructArr, i32 0, i64 %x, i32 2
+ %q = load i32, ptr %p
%r = icmp eq i32 %q, 9
ret i1 %r
}
; CHECK-LABEL: @test_load_load_combine_metadata(
; Check that align metadata is combined
-; CHECK: load i32*, i32** %0
+; CHECK: load ptr, ptr %0
; CHECK-SAME: !align ![[ALIGN:[0-9]+]]
-define void @test_load_load_combine_metadata(i32**, i32**, i32**) {
- %a = load i32*, i32** %0, !align !0
- %b = load i32*, i32** %0, !align !1
- store i32 0, i32* %a
- store i32 0, i32* %b
+define void @test_load_load_combine_metadata(ptr, ptr, ptr) {
+ %a = load ptr, ptr %0, !align !0
+ %b = load ptr, ptr %0, !align !1
+ store i32 0, ptr %a
+ store i32 0, ptr %b
ret void
}
; CHECK-LABEL: @test_load_load_combine_metadata(
; Check that dereferenceable metadata is combined
-; CHECK: load i32*, i32** %0
+; CHECK: load ptr, ptr %0
; CHECK-SAME: !dereferenceable ![[DEREF:[0-9]+]]
-define void @test_load_load_combine_metadata(i32**, i32**, i32**) {
- %a = load i32*, i32** %0, !dereferenceable !0
- %b = load i32*, i32** %0, !dereferenceable !1
- store i32 0, i32* %a
- store i32 0, i32* %b
+define void @test_load_load_combine_metadata(ptr, ptr, ptr) {
+ %a = load ptr, ptr %0, !dereferenceable !0
+ %b = load ptr, ptr %0, !dereferenceable !1
+ store i32 0, ptr %a
+ store i32 0, ptr %b
ret void
}
; CHECK-LABEL: @test_load_load_combine_metadata(
; Check that dereferenceable_or_null metadata is combined
-; CHECK: load i32*, i32** %0
+; CHECK: load ptr, ptr %0
; CHECK-SAME: !dereferenceable_or_null ![[DEREF:[0-9]+]]
-define void @test_load_load_combine_metadata(i32**, i32**, i32**) {
- %a = load i32*, i32** %0, !dereferenceable_or_null !0
- %b = load i32*, i32** %0, !dereferenceable_or_null !1
- store i32 0, i32* %a
- store i32 0, i32* %b
+define void @test_load_load_combine_metadata(ptr, ptr, ptr) {
+ %a = load ptr, ptr %0, !dereferenceable_or_null !0
+ %b = load ptr, ptr %0, !dereferenceable_or_null !1
+ store i32 0, ptr %a
+ store i32 0, ptr %b
ret void
}
; Check that nonnull metadata is propagated from dominating load.
; CHECK-LABEL: @combine_metadata_dominance1(
; CHECK-LABEL: bb1:
-; CHECK: load i32*, i32** %p, align 8, !nonnull !0
-; CHECK-NOT: load i32*, i32** %p
-define void @combine_metadata_dominance1(i32** %p) {
+; CHECK: load ptr, ptr %p, align 8, !nonnull !0
+; CHECK-NOT: load ptr, ptr %p
+define void @combine_metadata_dominance1(ptr %p) {
entry:
- %a = load i32*, i32** %p, !nonnull !0
+ %a = load ptr, ptr %p, !nonnull !0
br label %bb1
bb1:
- %b = load i32*, i32** %p
- store i32 0, i32* %a
- store i32 0, i32* %b
+ %b = load ptr, ptr %p
+ store i32 0, ptr %a
+ store i32 0, ptr %b
ret void
}
-declare i32 @use(i32*, i32) readonly
+declare i32 @use(ptr, i32) readonly
; Check that nonnull from the dominated load does not get propagated.
; There are some cases where it would be safe to keep it.
; CHECK-LABEL: @combine_metadata_dominance2(
; CHECK-NOT: nonnull
-define void @combine_metadata_dominance2(i32** %p, i1 %c1) {
+define void @combine_metadata_dominance2(ptr %p, i1 %c1) {
entry:
- %a = load i32*, i32** %p
+ %a = load ptr, ptr %p
br i1 %c1, label %bb1, label %bb2
bb1:
- %b = load i32*, i32** %p, !nonnull !0
- store i32 0, i32* %a
- store i32 0, i32* %b
+ %b = load ptr, ptr %p, !nonnull !0
+ store i32 0, ptr %a
+ store i32 0, ptr %b
ret void
bb2:
; CHECK-LABEL: @test_load_load_combine_metadata(
; Check that range and AA metadata is combined
-; CHECK: %[[V:.*]] = load i32, i32* %0
+; CHECK: %[[V:.*]] = load i32, ptr %0
; CHECK-SAME: !tbaa !{{[0-9]+}}
; CHECK-SAME: !range ![[RANGE:[0-9]+]]
-; CHECK: store i32 %[[V]], i32* %1
-; CHECK: store i32 %[[V]], i32* %2
-define void @test_load_load_combine_metadata(i32*, i32*, i32*) {
- %a = load i32, i32* %0, !tbaa !8, !range !0, !alias.scope !5, !noalias !6
- %b = load i32, i32* %0, !tbaa !8, !range !1
- store i32 %a, i32* %1
- store i32 %b, i32* %2
+; CHECK: store i32 %[[V]], ptr %1
+; CHECK: store i32 %[[V]], ptr %2
+define void @test_load_load_combine_metadata(ptr, ptr, ptr) {
+ %a = load i32, ptr %0, !tbaa !8, !range !0, !alias.scope !5, !noalias !6
+ %b = load i32, ptr %0, !tbaa !8, !range !1
+ store i32 %a, ptr %1
+ store i32 %b, ptr %2
ret void
}
; RUN: opt -tbaa -instcombine -S < %s | FileCheck %s
; Check that load to load forwarding works with non aliasing store inbetween.
-define i32 @test_load_store_load_combine(i32*, float*) {
+define i32 @test_load_store_load_combine(ptr, ptr) {
; CHECK-LABEL: @test_load_store_load_combine(
-; CHECK-NEXT: [[A:%.*]] = load i32, i32* [[TMP0:%.*]], align 4, !tbaa [[TBAA0:![0-9]+]]
+; CHECK-NEXT: [[A:%.*]] = load i32, ptr [[TMP0:%.*]], align 4, !tbaa [[TBAA0:![0-9]+]]
; CHECK-NEXT: [[F:%.*]] = sitofp i32 [[A]] to float
-; CHECK-NEXT: store float [[F]], float* [[TMP1:%.*]], align 4, !tbaa [[TBAA4:![0-9]+]]
+; CHECK-NEXT: store float [[F]], ptr [[TMP1:%.*]], align 4, !tbaa [[TBAA4:![0-9]+]]
; CHECK-NEXT: ret i32 [[A]]
;
- %a = load i32, i32* %0, align 4, !tbaa !0
+ %a = load i32, ptr %0, align 4, !tbaa !0
%f = sitofp i32 %a to float
- store float %f, float* %1, align 4, !tbaa !4
- %b = load i32, i32* %0, align 4, !tbaa !0
+ store float %f, ptr %1, align 4, !tbaa !4
+ %b = load i32, ptr %0, align 4, !tbaa !0
ret i32 %b
}
target datalayout = "e-p:32:32:32-i1:8:32-i8:8:32-i16:16:32-i32:32:32-i64:32:32-f32:32:32-f64:32:32-v64:64:64-v128:128:128-a0:0:32-n32"
-@a = constant [2 x i32] [i32 3, i32 6] ; <[2 x i32]*> [#uses=2]
+@a = constant [2 x i32] [i32 3, i32 6] ; <ptr> [#uses=2]
define i32 @b(i32 %y) nounwind readonly {
; CHECK-LABEL: @b(
; CHECK: ret i32
entry:
%0 = icmp eq i32 %y, 0 ; <i1> [#uses=1]
- %storemerge = select i1 %0, i32* getelementptr inbounds ([2 x i32], [2 x i32]* @a, i32 0, i32 1), i32* getelementptr inbounds ([2 x i32], [2 x i32]* @a, i32 0, i32 0) ; <i32*> [#uses=1]
- %1 = load i32, i32* %storemerge, align 4 ; <i32> [#uses=1]
+ %storemerge = select i1 %0, ptr getelementptr inbounds ([2 x i32], ptr @a, i32 0, i32 1), ptr @a ; <ptr> [#uses=1]
+ %1 = load i32, ptr %storemerge, align 4 ; <i32> [#uses=1]
ret i32 %1
}
; RUN: opt -S -passes=instcombine < %s | FileCheck %s --check-prefixes=CHECK,LITTLE
; RUN: opt -S -passes=instcombine -data-layout="E" < %s | FileCheck %s --check-prefixes=CHECK,BIG
-define i8 @load_smaller_int(i16* %p) {
+define i8 @load_smaller_int(ptr %p) {
; LITTLE-LABEL: @load_smaller_int(
-; LITTLE-NEXT: store i16 258, i16* [[P:%.*]], align 2
+; LITTLE-NEXT: store i16 258, ptr [[P:%.*]], align 2
; LITTLE-NEXT: ret i8 2
;
; BIG-LABEL: @load_smaller_int(
-; BIG-NEXT: store i16 258, i16* [[P:%.*]], align 2
+; BIG-NEXT: store i16 258, ptr [[P:%.*]], align 2
; BIG-NEXT: ret i8 1
;
- store i16 258, i16* %p
- %p2 = bitcast i16* %p to i8*
- %load = load i8, i8* %p2
+ store i16 258, ptr %p
+ %load = load i8, ptr %p
ret i8 %load
}
; This case can *not* be forwarded, as we only see part of the stored value.
-define i32 @load_larger_int(i16* %p) {
+define i32 @load_larger_int(ptr %p) {
; CHECK-LABEL: @load_larger_int(
-; CHECK-NEXT: store i16 258, i16* [[P:%.*]], align 2
-; CHECK-NEXT: [[P2:%.*]] = bitcast i16* [[P]] to i32*
-; CHECK-NEXT: [[LOAD:%.*]] = load i32, i32* [[P2]], align 4
+; CHECK-NEXT: store i16 258, ptr [[P:%.*]], align 2
+; CHECK-NEXT: [[LOAD:%.*]] = load i32, ptr [[P]], align 4
; CHECK-NEXT: ret i32 [[LOAD]]
;
- store i16 258, i16* %p
- %p2 = bitcast i16* %p to i32*
- %load = load i32, i32* %p2
+ store i16 258, ptr %p
+ %load = load i32, ptr %p
ret i32 %load
}
-define i32 @vec_store_load_first(i32* %p) {
+define i32 @vec_store_load_first(ptr %p) {
; CHECK-LABEL: @vec_store_load_first(
-; CHECK-NEXT: [[P2:%.*]] = bitcast i32* [[P:%.*]] to <2 x i32>*
-; CHECK-NEXT: store <2 x i32> <i32 1, i32 2>, <2 x i32>* [[P2]], align 8
+; CHECK-NEXT: store <2 x i32> <i32 1, i32 2>, ptr [[P:%.*]], align 8
; CHECK-NEXT: ret i32 1
;
- %p2 = bitcast i32* %p to <2 x i32>*
- store <2 x i32> <i32 1, i32 2>, <2 x i32>* %p2
- %load = load i32, i32* %p
+ store <2 x i32> <i32 1, i32 2>, ptr %p
+ %load = load i32, ptr %p
ret i32 %load
}
-define i17 @vec_store_load_first_odd_size(i17* %p) {
+define i17 @vec_store_load_first_odd_size(ptr %p) {
; CHECK-LABEL: @vec_store_load_first_odd_size(
-; CHECK-NEXT: [[P2:%.*]] = bitcast i17* [[P:%.*]] to <2 x i17>*
-; CHECK-NEXT: store <2 x i17> <i17 1, i17 2>, <2 x i17>* [[P2]], align 8
-; CHECK-NEXT: [[LOAD:%.*]] = load i17, i17* [[P]], align 4
+; CHECK-NEXT: store <2 x i17> <i17 1, i17 2>, ptr [[P:%.*]], align 8
+; CHECK-NEXT: [[LOAD:%.*]] = load i17, ptr [[P]], align 4
; CHECK-NEXT: ret i17 [[LOAD]]
;
- %p2 = bitcast i17* %p to <2 x i17>*
- store <2 x i17> <i17 1, i17 2>, <2 x i17>* %p2
- %load = load i17, i17* %p
+ store <2 x i17> <i17 1, i17 2>, ptr %p
+ %load = load i17, ptr %p
ret i17 %load
}
-define i32 @vec_store_load_first_constexpr(i32* %p) {
+define i32 @vec_store_load_first_constexpr(ptr %p) {
; CHECK-LABEL: @vec_store_load_first_constexpr(
-; CHECK-NEXT: [[P2:%.*]] = bitcast i32* [[P:%.*]] to <2 x i32>*
-; CHECK-NEXT: store <2 x i32> bitcast (i64 ptrtoint (i32 (i32*)* @vec_store_load_first to i64) to <2 x i32>), <2 x i32>* [[P2]], align 8
-; CHECK-NEXT: [[LOAD:%.*]] = load i32, i32* [[P]], align 4
+; CHECK-NEXT: store <2 x i32> bitcast (i64 ptrtoint (ptr @vec_store_load_first to i64) to <2 x i32>), ptr [[P:%.*]], align 8
+; CHECK-NEXT: [[LOAD:%.*]] = load i32, ptr [[P]], align 4
; CHECK-NEXT: ret i32 [[LOAD]]
;
- %p2 = bitcast i32* %p to <2 x i32>*
- store <2 x i32> bitcast (i64 ptrtoint (i32 (i32*)* @vec_store_load_first to i64) to <2 x i32>), <2 x i32>* %p2, align 8
- %load = load i32, i32* %p, align 4
+ store <2 x i32> bitcast (i64 ptrtoint (ptr @vec_store_load_first to i64) to <2 x i32>), ptr %p, align 8
+ %load = load i32, ptr %p, align 4
ret i32 %load
}
-define i32 @vec_store_load_second(i32* %p) {
+define i32 @vec_store_load_second(ptr %p) {
; CHECK-LABEL: @vec_store_load_second(
-; CHECK-NEXT: [[P2:%.*]] = bitcast i32* [[P:%.*]] to <2 x i32>*
-; CHECK-NEXT: store <2 x i32> <i32 1, i32 2>, <2 x i32>* [[P2]], align 8
-; CHECK-NEXT: [[P3:%.*]] = getelementptr i32, i32* [[P]], i64 1
-; CHECK-NEXT: [[LOAD:%.*]] = load i32, i32* [[P3]], align 4
+; CHECK-NEXT: store <2 x i32> <i32 1, i32 2>, ptr [[P:%.*]], align 8
+; CHECK-NEXT: [[P3:%.*]] = getelementptr i32, ptr [[P]], i64 1
+; CHECK-NEXT: [[LOAD:%.*]] = load i32, ptr [[P3]], align 4
; CHECK-NEXT: ret i32 [[LOAD]]
;
- %p2 = bitcast i32* %p to <2 x i32>*
- store <2 x i32> <i32 1, i32 2>, <2 x i32>* %p2
- %p3 = getelementptr i32, i32* %p, i64 1
- %load = load i32, i32* %p3
+ store <2 x i32> <i32 1, i32 2>, ptr %p
+ %p3 = getelementptr i32, ptr %p, i64 1
+ %load = load i32, ptr %p3
ret i32 %load
}
-define i64 @vec_store_load_whole(i32* %p) {
+define i64 @vec_store_load_whole(ptr %p) {
; LITTLE-LABEL: @vec_store_load_whole(
-; LITTLE-NEXT: [[P2:%.*]] = bitcast i32* [[P:%.*]] to <2 x i32>*
-; LITTLE-NEXT: store <2 x i32> <i32 1, i32 2>, <2 x i32>* [[P2]], align 8
+; LITTLE-NEXT: store <2 x i32> <i32 1, i32 2>, ptr [[P:%.*]], align 8
; LITTLE-NEXT: ret i64 8589934593
;
; BIG-LABEL: @vec_store_load_whole(
-; BIG-NEXT: [[P2:%.*]] = bitcast i32* [[P:%.*]] to <2 x i32>*
-; BIG-NEXT: store <2 x i32> <i32 1, i32 2>, <2 x i32>* [[P2]], align 8
+; BIG-NEXT: store <2 x i32> <i32 1, i32 2>, ptr [[P:%.*]], align 8
; BIG-NEXT: ret i64 4294967298
;
- %p2 = bitcast i32* %p to <2 x i32>*
- store <2 x i32> <i32 1, i32 2>, <2 x i32>* %p2
- %p3 = bitcast i32* %p to i64*
- %load = load i64, i64* %p3
+ store <2 x i32> <i32 1, i32 2>, ptr %p
+ %load = load i64, ptr %p
ret i64 %load
}
-define i32 @vec_store_load_overlap(i32* %p) {
+define i32 @vec_store_load_overlap(ptr %p) {
; CHECK-LABEL: @vec_store_load_overlap(
-; CHECK-NEXT: [[P2:%.*]] = bitcast i32* [[P:%.*]] to <2 x i32>*
-; CHECK-NEXT: store <2 x i32> <i32 1, i32 2>, <2 x i32>* [[P2]], align 8
-; CHECK-NEXT: [[P3:%.*]] = bitcast i32* [[P]] to i8*
-; CHECK-NEXT: [[P4:%.*]] = getelementptr i8, i8* [[P3]], i64 2
-; CHECK-NEXT: [[P5:%.*]] = bitcast i8* [[P4]] to i32*
-; CHECK-NEXT: [[LOAD:%.*]] = load i32, i32* [[P5]], align 2
+; CHECK-NEXT: store <2 x i32> <i32 1, i32 2>, ptr [[P:%.*]], align 8
+; CHECK-NEXT: [[P4:%.*]] = getelementptr i8, ptr [[P]], i64 2
+; CHECK-NEXT: [[LOAD:%.*]] = load i32, ptr [[P4]], align 2
; CHECK-NEXT: ret i32 [[LOAD]]
;
- %p2 = bitcast i32* %p to <2 x i32>*
- store <2 x i32> <i32 1, i32 2>, <2 x i32>* %p2
- %p3 = bitcast i32* %p to i8*
- %p4 = getelementptr i8, i8* %p3, i64 2
- %p5 = bitcast i8* %p4 to i32*
- %load = load i32, i32* %p5, align 2
+ store <2 x i32> <i32 1, i32 2>, ptr %p
+ %p4 = getelementptr i8, ptr %p, i64 2
+ %load = load i32, ptr %p4, align 2
ret i32 %load
}
-define i32 @load_i32_store_nxv4i32(i32* %a) {
+define i32 @load_i32_store_nxv4i32(ptr %a) {
; CHECK-LABEL: @load_i32_store_nxv4i32(
; CHECK-NEXT: entry:
-; CHECK-NEXT: [[TMP0:%.*]] = bitcast i32* [[A:%.*]] to <vscale x 4 x i32>*
-; CHECK-NEXT: store <vscale x 4 x i32> shufflevector (<vscale x 4 x i32> insertelement (<vscale x 4 x i32> poison, i32 1, i64 0), <vscale x 4 x i32> poison, <vscale x 4 x i32> zeroinitializer), <vscale x 4 x i32>* [[TMP0]], align 16
-; CHECK-NEXT: [[TMP1:%.*]] = load i32, i32* [[A]], align 4
+; CHECK-NEXT: store <vscale x 4 x i32> shufflevector (<vscale x 4 x i32> insertelement (<vscale x 4 x i32> poison, i32 1, i64 0), <vscale x 4 x i32> poison, <vscale x 4 x i32> zeroinitializer), ptr [[A:%.*]], align 16
+; CHECK-NEXT: [[TMP1:%.*]] = load i32, ptr [[A]], align 4
; CHECK-NEXT: ret i32 [[TMP1]]
;
entry:
- %0 = bitcast i32* %a to <vscale x 4 x i32>*
- store <vscale x 4 x i32> shufflevector (<vscale x 4 x i32> insertelement (<vscale x 4 x i32> poison, i32 1, i64 0), <vscale x 4 x i32> poison, <vscale x 4 x i32> zeroinitializer), <vscale x 4 x i32>* %0, align 16
- %1 = load i32, i32* %a, align 4
- ret i32 %1
+ store <vscale x 4 x i32> shufflevector (<vscale x 4 x i32> insertelement (<vscale x 4 x i32> poison, i32 1, i64 0), <vscale x 4 x i32> poison, <vscale x 4 x i32> zeroinitializer), ptr %a, align 16
+ %0 = load i32, ptr %a, align 4
+ ret i32 %0
}
-define i64 @load_i64_store_nxv8i8(i8* %a) {
+define i64 @load_i64_store_nxv8i8(ptr %a) {
; CHECK-LABEL: @load_i64_store_nxv8i8(
; CHECK-NEXT: entry:
-; CHECK-NEXT: [[TMP0:%.*]] = bitcast i8* [[A:%.*]] to <vscale x 8 x i8>*
-; CHECK-NEXT: store <vscale x 8 x i8> shufflevector (<vscale x 8 x i8> insertelement (<vscale x 8 x i8> poison, i8 1, i32 0), <vscale x 8 x i8> poison, <vscale x 8 x i32> zeroinitializer), <vscale x 8 x i8>* [[TMP0]], align 16
-; CHECK-NEXT: [[A2:%.*]] = bitcast i8* [[A]] to i64*
-; CHECK-NEXT: [[LOAD:%.*]] = load i64, i64* [[A2]], align 8
+; CHECK-NEXT: store <vscale x 8 x i8> shufflevector (<vscale x 8 x i8> insertelement (<vscale x 8 x i8> poison, i8 1, i32 0), <vscale x 8 x i8> poison, <vscale x 8 x i32> zeroinitializer), ptr [[A:%.*]], align 16
+; CHECK-NEXT: [[LOAD:%.*]] = load i64, ptr [[A]], align 8
; CHECK-NEXT: ret i64 [[LOAD]]
;
entry:
- %0 = bitcast i8* %a to <vscale x 8 x i8>*
- store <vscale x 8 x i8> shufflevector (<vscale x 8 x i8> insertelement (<vscale x 8 x i8> poison, i8 1, i32 0), <vscale x 8 x i8> poison, <vscale x 8 x i32> zeroinitializer), <vscale x 8 x i8>* %0, align 16
- %a2 = bitcast i8* %a to i64*
- %load = load i64, i64* %a2, align 8
+ store <vscale x 8 x i8> shufflevector (<vscale x 8 x i8> insertelement (<vscale x 8 x i8> poison, i8 1, i32 0), <vscale x 8 x i8> poison, <vscale x 8 x i32> zeroinitializer), ptr %a, align 16
+ %load = load i64, ptr %a, align 8
ret i64 %load
}
-define i64 @load_i64_store_nxv4i32(i32* %a) {
+define i64 @load_i64_store_nxv4i32(ptr %a) {
; CHECK-LABEL: @load_i64_store_nxv4i32(
; CHECK-NEXT: entry:
-; CHECK-NEXT: [[TMP0:%.*]] = bitcast i32* [[A:%.*]] to <vscale x 4 x i32>*
-; CHECK-NEXT: store <vscale x 4 x i32> shufflevector (<vscale x 4 x i32> insertelement (<vscale x 4 x i32> poison, i32 1, i64 0), <vscale x 4 x i32> poison, <vscale x 4 x i32> zeroinitializer), <vscale x 4 x i32>* [[TMP0]], align 16
-; CHECK-NEXT: [[A2:%.*]] = bitcast i32* [[A]] to i64*
-; CHECK-NEXT: [[LOAD:%.*]] = load i64, i64* [[A2]], align 8
+; CHECK-NEXT: store <vscale x 4 x i32> shufflevector (<vscale x 4 x i32> insertelement (<vscale x 4 x i32> poison, i32 1, i64 0), <vscale x 4 x i32> poison, <vscale x 4 x i32> zeroinitializer), ptr [[A:%.*]], align 16
+; CHECK-NEXT: [[LOAD:%.*]] = load i64, ptr [[A]], align 8
; CHECK-NEXT: ret i64 [[LOAD]]
;
entry:
- %0 = bitcast i32* %a to <vscale x 4 x i32>*
- store <vscale x 4 x i32> shufflevector (<vscale x 4 x i32> insertelement (<vscale x 4 x i32> poison, i32 1, i64 0), <vscale x 4 x i32> poison, <vscale x 4 x i32> zeroinitializer), <vscale x 4 x i32>* %0, align 16
- %a2 = bitcast i32* %a to i64*
- %load = load i64, i64* %a2, align 8
+ store <vscale x 4 x i32> shufflevector (<vscale x 4 x i32> insertelement (<vscale x 4 x i32> poison, i32 1, i64 0), <vscale x 4 x i32> poison, <vscale x 4 x i32> zeroinitializer), ptr %a, align 16
+ %load = load i64, ptr %a, align 8
ret i64 %load
}
-define i8 @load_i8_store_nxv4i32(i32* %a) {
+define i8 @load_i8_store_nxv4i32(ptr %a) {
; CHECK-LABEL: @load_i8_store_nxv4i32(
; CHECK-NEXT: entry:
-; CHECK-NEXT: [[TMP0:%.*]] = bitcast i32* [[A:%.*]] to <vscale x 4 x i32>*
-; CHECK-NEXT: store <vscale x 4 x i32> shufflevector (<vscale x 4 x i32> insertelement (<vscale x 4 x i32> poison, i32 1, i64 0), <vscale x 4 x i32> poison, <vscale x 4 x i32> zeroinitializer), <vscale x 4 x i32>* [[TMP0]], align 16
-; CHECK-NEXT: [[A2:%.*]] = bitcast i32* [[A]] to i8*
-; CHECK-NEXT: [[LOAD:%.*]] = load i8, i8* [[A2]], align 1
+; CHECK-NEXT: store <vscale x 4 x i32> shufflevector (<vscale x 4 x i32> insertelement (<vscale x 4 x i32> poison, i32 1, i64 0), <vscale x 4 x i32> poison, <vscale x 4 x i32> zeroinitializer), ptr [[A:%.*]], align 16
+; CHECK-NEXT: [[LOAD:%.*]] = load i8, ptr [[A]], align 1
; CHECK-NEXT: ret i8 [[LOAD]]
;
entry:
- %0 = bitcast i32* %a to <vscale x 4 x i32>*
- store <vscale x 4 x i32> shufflevector (<vscale x 4 x i32> insertelement (<vscale x 4 x i32> poison, i32 1, i64 0), <vscale x 4 x i32> poison, <vscale x 4 x i32> zeroinitializer), <vscale x 4 x i32>* %0, align 16
- %a2 = bitcast i32* %a to i8*
- %load = load i8, i8* %a2, align 1
+ store <vscale x 4 x i32> shufflevector (<vscale x 4 x i32> insertelement (<vscale x 4 x i32> poison, i32 1, i64 0), <vscale x 4 x i32> poison, <vscale x 4 x i32> zeroinitializer), ptr %a, align 16
+ %load = load i8, ptr %a, align 1
ret i8 %load
}
-define float @load_f32_store_nxv4f32(float* %a) {
+define float @load_f32_store_nxv4f32(ptr %a) {
; CHECK-LABEL: @load_f32_store_nxv4f32(
; CHECK-NEXT: entry:
-; CHECK-NEXT: [[TMP0:%.*]] = bitcast float* [[A:%.*]] to <vscale x 4 x float>*
-; CHECK-NEXT: store <vscale x 4 x float> shufflevector (<vscale x 4 x float> insertelement (<vscale x 4 x float> poison, float 1.000000e+00, i64 0), <vscale x 4 x float> poison, <vscale x 4 x i32> zeroinitializer), <vscale x 4 x float>* [[TMP0]], align 16
-; CHECK-NEXT: [[TMP1:%.*]] = load float, float* [[A]], align 4
+; CHECK-NEXT: store <vscale x 4 x float> shufflevector (<vscale x 4 x float> insertelement (<vscale x 4 x float> poison, float 1.000000e+00, i64 0), <vscale x 4 x float> poison, <vscale x 4 x i32> zeroinitializer), ptr [[A:%.*]], align 16
+; CHECK-NEXT: [[TMP1:%.*]] = load float, ptr [[A]], align 4
; CHECK-NEXT: ret float [[TMP1]]
;
entry:
- %0 = bitcast float* %a to <vscale x 4 x float>*
- store <vscale x 4 x float> shufflevector (<vscale x 4 x float> insertelement (<vscale x 4 x float> poison, float 1.0, i64 0), <vscale x 4 x float> poison, <vscale x 4 x i32> zeroinitializer), <vscale x 4 x float>* %0, align 16
- %1 = load float, float* %a, align 4
- ret float %1
+ store <vscale x 4 x float> shufflevector (<vscale x 4 x float> insertelement (<vscale x 4 x float> poison, float 1.0, i64 0), <vscale x 4 x float> poison, <vscale x 4 x i32> zeroinitializer), ptr %a, align 16
+ %0 = load float, ptr %a, align 4
+ ret float %0
}
-define i32 @load_i32_store_nxv4f32(float* %a) {
+define i32 @load_i32_store_nxv4f32(ptr %a) {
; CHECK-LABEL: @load_i32_store_nxv4f32(
; CHECK-NEXT: entry:
-; CHECK-NEXT: [[TMP0:%.*]] = bitcast float* [[A:%.*]] to <vscale x 4 x float>*
-; CHECK-NEXT: store <vscale x 4 x float> shufflevector (<vscale x 4 x float> insertelement (<vscale x 4 x float> poison, float 1.000000e+00, i64 0), <vscale x 4 x float> poison, <vscale x 4 x i32> zeroinitializer), <vscale x 4 x float>* [[TMP0]], align 16
-; CHECK-NEXT: [[A2:%.*]] = bitcast float* [[A]] to i32*
-; CHECK-NEXT: [[LOAD:%.*]] = load i32, i32* [[A2]], align 4
+; CHECK-NEXT: store <vscale x 4 x float> shufflevector (<vscale x 4 x float> insertelement (<vscale x 4 x float> poison, float 1.000000e+00, i64 0), <vscale x 4 x float> poison, <vscale x 4 x i32> zeroinitializer), ptr [[A:%.*]], align 16
+; CHECK-NEXT: [[LOAD:%.*]] = load i32, ptr [[A]], align 4
; CHECK-NEXT: ret i32 [[LOAD]]
;
entry:
- %0 = bitcast float* %a to <vscale x 4 x float>*
- store <vscale x 4 x float> shufflevector (<vscale x 4 x float> insertelement (<vscale x 4 x float> poison, float 1.0, i64 0), <vscale x 4 x float> poison, <vscale x 4 x i32> zeroinitializer), <vscale x 4 x float>* %0, align 16
- %a2 = bitcast float* %a to i32*
- %load = load i32, i32* %a2, align 4
+ store <vscale x 4 x float> shufflevector (<vscale x 4 x float> insertelement (<vscale x 4 x float> poison, float 1.0, i64 0), <vscale x 4 x float> poison, <vscale x 4 x i32> zeroinitializer), ptr %a, align 16
+ %load = load i32, ptr %a, align 4
ret i32 %load
}
-define <4 x i32> @load_v4i32_store_nxv4i32(i32* %a) {
+define <4 x i32> @load_v4i32_store_nxv4i32(ptr %a) {
; CHECK-LABEL: @load_v4i32_store_nxv4i32(
; CHECK-NEXT: entry:
-; CHECK-NEXT: [[TMP0:%.*]] = bitcast i32* [[A:%.*]] to <vscale x 4 x i32>*
-; CHECK-NEXT: store <vscale x 4 x i32> shufflevector (<vscale x 4 x i32> insertelement (<vscale x 4 x i32> poison, i32 1, i64 0), <vscale x 4 x i32> poison, <vscale x 4 x i32> zeroinitializer), <vscale x 4 x i32>* [[TMP0]], align 16
-; CHECK-NEXT: [[TMP1:%.*]] = bitcast i32* [[A]] to <4 x i32>*
-; CHECK-NEXT: [[TMP2:%.*]] = load <4 x i32>, <4 x i32>* [[TMP1]], align 16
+; CHECK-NEXT: store <vscale x 4 x i32> shufflevector (<vscale x 4 x i32> insertelement (<vscale x 4 x i32> poison, i32 1, i64 0), <vscale x 4 x i32> poison, <vscale x 4 x i32> zeroinitializer), ptr [[A:%.*]], align 16
+; CHECK-NEXT: [[TMP2:%.*]] = load <4 x i32>, ptr [[A]], align 16
; CHECK-NEXT: ret <4 x i32> [[TMP2]]
;
entry:
- %0 = bitcast i32* %a to <vscale x 4 x i32>*
- store <vscale x 4 x i32> shufflevector (<vscale x 4 x i32> insertelement (<vscale x 4 x i32> poison, i32 1, i64 0), <vscale x 4 x i32> poison, <vscale x 4 x i32> zeroinitializer), <vscale x 4 x i32>* %0, align 16
- %1 = bitcast i32* %a to <4 x i32>*
- %2 = load <4 x i32>, <4 x i32>* %1, align 16
- ret <4 x i32> %2
+ store <vscale x 4 x i32> shufflevector (<vscale x 4 x i32> insertelement (<vscale x 4 x i32> poison, i32 1, i64 0), <vscale x 4 x i32> poison, <vscale x 4 x i32> zeroinitializer), ptr %a, align 16
+ %0 = load <4 x i32>, ptr %a, align 16
+ ret <4 x i32> %0
}
-define <4 x i16> @load_v4i16_store_nxv4i32(i32* %a) {
+define <4 x i16> @load_v4i16_store_nxv4i32(ptr %a) {
; CHECK-LABEL: @load_v4i16_store_nxv4i32(
; CHECK-NEXT: entry:
-; CHECK-NEXT: [[TMP0:%.*]] = bitcast i32* [[A:%.*]] to <vscale x 4 x i32>*
-; CHECK-NEXT: store <vscale x 4 x i32> shufflevector (<vscale x 4 x i32> insertelement (<vscale x 4 x i32> poison, i32 1, i64 0), <vscale x 4 x i32> poison, <vscale x 4 x i32> zeroinitializer), <vscale x 4 x i32>* [[TMP0]], align 16
-; CHECK-NEXT: [[TMP1:%.*]] = bitcast i32* [[A]] to <4 x i16>*
-; CHECK-NEXT: [[TMP2:%.*]] = load <4 x i16>, <4 x i16>* [[TMP1]], align 16
+; CHECK-NEXT: store <vscale x 4 x i32> shufflevector (<vscale x 4 x i32> insertelement (<vscale x 4 x i32> poison, i32 1, i64 0), <vscale x 4 x i32> poison, <vscale x 4 x i32> zeroinitializer), ptr [[A:%.*]], align 16
+; CHECK-NEXT: [[TMP2:%.*]] = load <4 x i16>, ptr [[A]], align 16
; CHECK-NEXT: ret <4 x i16> [[TMP2]]
;
entry:
- %0 = bitcast i32* %a to <vscale x 4 x i32>*
- store <vscale x 4 x i32> shufflevector (<vscale x 4 x i32> insertelement (<vscale x 4 x i32> poison, i32 1, i64 0), <vscale x 4 x i32> poison, <vscale x 4 x i32> zeroinitializer), <vscale x 4 x i32>* %0, align 16
- %1 = bitcast i32* %a to <4 x i16>*
- %2 = load <4 x i16>, <4 x i16>* %1, align 16
- ret <4 x i16> %2
+ store <vscale x 4 x i32> shufflevector (<vscale x 4 x i32> insertelement (<vscale x 4 x i32> poison, i32 1, i64 0), <vscale x 4 x i32> poison, <vscale x 4 x i32> zeroinitializer), ptr %a, align 16
+ %0 = load <4 x i16>, ptr %a, align 16
+ ret <4 x i16> %0
}
; Loaded data type exceeds the known minimum size of the store.
-define i64 @load_i64_store_nxv4i8(i8* %a) {
+define i64 @load_i64_store_nxv4i8(ptr %a) {
; CHECK-LABEL: @load_i64_store_nxv4i8(
; CHECK-NEXT: entry:
-; CHECK-NEXT: [[TMP0:%.*]] = bitcast i8* [[A:%.*]] to <vscale x 4 x i8>*
-; CHECK-NEXT: store <vscale x 4 x i8> shufflevector (<vscale x 4 x i8> insertelement (<vscale x 4 x i8> poison, i8 1, i32 0), <vscale x 4 x i8> poison, <vscale x 4 x i32> zeroinitializer), <vscale x 4 x i8>* [[TMP0]], align 16
-; CHECK-NEXT: [[A2:%.*]] = bitcast i8* [[A]] to i64*
-; CHECK-NEXT: [[LOAD:%.*]] = load i64, i64* [[A2]], align 8
+; CHECK-NEXT: store <vscale x 4 x i8> shufflevector (<vscale x 4 x i8> insertelement (<vscale x 4 x i8> poison, i8 1, i32 0), <vscale x 4 x i8> poison, <vscale x 4 x i32> zeroinitializer), ptr [[A:%.*]], align 16
+; CHECK-NEXT: [[LOAD:%.*]] = load i64, ptr [[A]], align 8
; CHECK-NEXT: ret i64 [[LOAD]]
;
entry:
- %0 = bitcast i8* %a to <vscale x 4 x i8>*
- store <vscale x 4 x i8> shufflevector (<vscale x 4 x i8> insertelement (<vscale x 4 x i8> poison, i8 1, i32 0), <vscale x 4 x i8> poison, <vscale x 4 x i32> zeroinitializer), <vscale x 4 x i8>* %0, align 16
- %a2 = bitcast i8* %a to i64*
- %load = load i64, i64* %a2, align 8
+ store <vscale x 4 x i8> shufflevector (<vscale x 4 x i8> insertelement (<vscale x 4 x i8> poison, i8 1, i32 0), <vscale x 4 x i8> poison, <vscale x 4 x i32> zeroinitializer), ptr %a, align 16
+ %load = load i64, ptr %a, align 8
ret i64 %load
}
; Loaded data size is unknown - we cannot guarantee it won't
; exceed the store size.
-define <vscale x 4 x i8> @load_nxv4i8_store_nxv4i32(i32* %a) {
+define <vscale x 4 x i8> @load_nxv4i8_store_nxv4i32(ptr %a) {
; CHECK-LABEL: @load_nxv4i8_store_nxv4i32(
; CHECK-NEXT: entry:
-; CHECK-NEXT: [[TMP0:%.*]] = bitcast i32* [[A:%.*]] to <vscale x 4 x i32>*
-; CHECK-NEXT: store <vscale x 4 x i32> shufflevector (<vscale x 4 x i32> insertelement (<vscale x 4 x i32> poison, i32 1, i64 0), <vscale x 4 x i32> poison, <vscale x 4 x i32> zeroinitializer), <vscale x 4 x i32>* [[TMP0]], align 16
-; CHECK-NEXT: [[TMP1:%.*]] = bitcast i32* [[A]] to <vscale x 4 x i8>*
-; CHECK-NEXT: [[TMP2:%.*]] = load <vscale x 4 x i8>, <vscale x 4 x i8>* [[TMP1]], align 16
+; CHECK-NEXT: store <vscale x 4 x i32> shufflevector (<vscale x 4 x i32> insertelement (<vscale x 4 x i32> poison, i32 1, i64 0), <vscale x 4 x i32> poison, <vscale x 4 x i32> zeroinitializer), ptr [[A:%.*]], align 16
+; CHECK-NEXT: [[TMP2:%.*]] = load <vscale x 4 x i8>, ptr [[A]], align 16
; CHECK-NEXT: ret <vscale x 4 x i8> [[TMP2]]
;
entry:
- %0 = bitcast i32* %a to <vscale x 4 x i32>*
- store <vscale x 4 x i32> shufflevector (<vscale x 4 x i32> insertelement (<vscale x 4 x i32> poison, i32 1, i64 0), <vscale x 4 x i32> poison, <vscale x 4 x i32> zeroinitializer), <vscale x 4 x i32>* %0, align 16
- %1 = bitcast i32* %a to <vscale x 4 x i8>*
- %2 = load <vscale x 4 x i8>, <vscale x 4 x i8>* %1, align 16
- ret <vscale x 4 x i8> %2
+ store <vscale x 4 x i32> shufflevector (<vscale x 4 x i32> insertelement (<vscale x 4 x i32> poison, i32 1, i64 0), <vscale x 4 x i32> poison, <vscale x 4 x i32> zeroinitializer), ptr %a, align 16
+ %0 = load <vscale x 4 x i8>, ptr %a, align 16
+ ret <vscale x 4 x i8> %0
}
-define i8 @load_i8_store_i1(i1* %a) {
+define i8 @load_i8_store_i1(ptr %a) {
; CHECK-LABEL: @load_i8_store_i1(
-; CHECK-NEXT: store i1 true, i1* [[A:%.*]], align 1
-; CHECK-NEXT: [[A_I8:%.*]] = bitcast i1* [[A]] to i8*
-; CHECK-NEXT: [[V:%.*]] = load i8, i8* [[A_I8]], align 1
+; CHECK-NEXT: store i1 true, ptr [[A:%.*]], align 1
+; CHECK-NEXT: [[V:%.*]] = load i8, ptr [[A]], align 1
; CHECK-NEXT: ret i8 [[V]]
;
- store i1 true, i1* %a
- %a.i8 = bitcast i1* %a to i8*
- %v = load i8, i8* %a.i8
+ store i1 true, ptr %a
+ %v = load i8, ptr %a
ret i8 %v
}
-define i1 @load_i1_store_i8(i8* %a) {
+define i1 @load_i1_store_i8(ptr %a) {
; CHECK-LABEL: @load_i1_store_i8(
-; CHECK-NEXT: store i8 1, i8* [[A:%.*]], align 1
+; CHECK-NEXT: store i8 1, ptr [[A:%.*]], align 1
; CHECK-NEXT: ret i1 true
;
- store i8 1, i8* %a
- %a.i1 = bitcast i8* %a to i1*
- %v = load i1, i1* %a.i1
+ store i8 1, ptr %a
+ %v = load i1, ptr %a
ret i1 %v
}
target datalayout = "e-m:e-p:64:64:64-i64:64-f80:128-n8:16:32:64-S128-ni:1"
-@X = constant i32 42 ; <i32*> [#uses=2]
-@X2 = constant i32 47 ; <i32*> [#uses=1]
-@Y = constant [2 x { i32, float }] [ { i32, float } { i32 12, float 1.000000e+00 }, { i32, float } { i32 37, float 0x3FF3B2FEC0000000 } ] ; <[2 x { i32, float }]*> [#uses=2]
-@Z = constant [2 x { i32, float }] zeroinitializer ; <[2 x { i32, float }]*> [#uses=1]
+@X = constant i32 42 ; <ptr> [#uses=2]
+@X2 = constant i32 47 ; <ptr> [#uses=1]
+@Y = constant [2 x { i32, float }] [ { i32, float } { i32 12, float 1.000000e+00 }, { i32, float } { i32 37, float 0x3FF3B2FEC0000000 } ] ; <ptr> [#uses=2]
+@Z = constant [2 x { i32, float }] zeroinitializer ; <ptr> [#uses=1]
@GLOBAL = internal constant [4 x i32] zeroinitializer
; CHECK-LABEL: @test1(
; CHECK-NEXT: ret i32 42
;
- %B = load i32, i32* @X ; <i32> [#uses=1]
+ %B = load i32, ptr @X ; <i32> [#uses=1]
ret i32 %B
}
; CHECK-LABEL: @test2(
; CHECK-NEXT: ret float 0x3FF3B2FEC0000000
;
- %A = getelementptr [2 x { i32, float }], [2 x { i32, float }]* @Y, i64 0, i64 1, i32 1 ; <float*> [#uses=1]
- %B = load float, float* %A ; <float> [#uses=1]
+ %A = getelementptr [2 x { i32, float }], ptr @Y, i64 0, i64 1, i32 1 ; <ptr> [#uses=1]
+ %B = load float, ptr %A ; <float> [#uses=1]
ret float %B
}
; CHECK-LABEL: @test3(
; CHECK-NEXT: ret i32 12
;
- %A = getelementptr [2 x { i32, float }], [2 x { i32, float }]* @Y, i64 0, i64 0, i32 0 ; <i32*> [#uses=1]
- %B = load i32, i32* %A ; <i32> [#uses=1]
+ %A = getelementptr [2 x { i32, float }], ptr @Y, i64 0, i64 0, i32 0 ; <ptr> [#uses=1]
+ %B = load i32, ptr %A ; <i32> [#uses=1]
ret i32 %B
}
; CHECK-LABEL: @test4(
; CHECK-NEXT: ret i32 0
;
- %A = getelementptr [2 x { i32, float }], [2 x { i32, float }]* @Z, i64 0, i64 1, i32 0 ; <i32*> [#uses=1]
- %B = load i32, i32* %A ; <i32> [#uses=1]
+ %A = getelementptr [2 x { i32, float }], ptr @Z, i64 0, i64 1, i32 0 ; <ptr> [#uses=1]
+ %B = load i32, ptr %A ; <i32> [#uses=1]
ret i32 %B
}
; CHECK-NEXT: [[Z:%.*]] = select i1 [[C:%.*]], i32 42, i32 47
; CHECK-NEXT: ret i32 [[Z]]
;
- %Y = select i1 %C, i32* @X, i32* @X2 ; <i32*> [#uses=1]
- %Z = load i32, i32* %Y ; <i32> [#uses=1]
+ %Y = select i1 %C, ptr @X, ptr @X2 ; <ptr> [#uses=1]
+ %Z = load i32, ptr %Y ; <i32> [#uses=1]
ret i32 %Z
}
define i32 @load_gep_null_inbounds(i64 %X) {
; CHECK-LABEL: @load_gep_null_inbounds(
-; CHECK-NEXT: store i32 poison, i32* null, align 4294967296
+; CHECK-NEXT: store i32 poison, ptr null, align 4294967296
; CHECK-NEXT: ret i32 poison
;
- %V = getelementptr inbounds i32, i32* null, i64 %X
- %R = load i32, i32* %V
+ %V = getelementptr inbounds i32, ptr null, i64 %X
+ %R = load i32, ptr %V
ret i32 %R
}
define i32 @load_gep_null_not_inbounds(i64 %X) {
; CHECK-LABEL: @load_gep_null_not_inbounds(
-; CHECK-NEXT: store i32 poison, i32* null, align 4294967296
+; CHECK-NEXT: store i32 poison, ptr null, align 4294967296
; CHECK-NEXT: ret i32 poison
;
- %V = getelementptr i32, i32* null, i64 %X
- %R = load i32, i32* %V
+ %V = getelementptr i32, ptr null, i64 %X
+ %R = load i32, ptr %V
ret i32 %R
}
define i32 @test7_no_null_opt(i32 %X) #0 {
; CHECK-LABEL: @test7_no_null_opt(
; CHECK-NEXT: [[TMP1:%.*]] = sext i32 [[X:%.*]] to i64
-; CHECK-NEXT: [[V:%.*]] = getelementptr i32, i32* null, i64 [[TMP1]]
-; CHECK-NEXT: [[R:%.*]] = load i32, i32* [[V]], align 4
+; CHECK-NEXT: [[V:%.*]] = getelementptr i32, ptr null, i64 [[TMP1]]
+; CHECK-NEXT: [[R:%.*]] = load i32, ptr [[V]], align 4
; CHECK-NEXT: ret i32 [[R]]
;
- %V = getelementptr i32, i32* null, i32 %X ; <i32*> [#uses=1]
- %R = load i32, i32* %V ; <i32> [#uses=1]
+ %V = getelementptr i32, ptr null, i32 %X ; <ptr> [#uses=1]
+ %R = load i32, ptr %V ; <i32> [#uses=1]
ret i32 %R
}
attributes #0 = { null_pointer_is_valid }
-define i32 @test8(i32* %P) {
+define i32 @test8(ptr %P) {
; CHECK-LABEL: @test8(
-; CHECK-NEXT: store i32 1, i32* [[P:%.*]], align 4
+; CHECK-NEXT: store i32 1, ptr [[P:%.*]], align 4
; CHECK-NEXT: ret i32 1
;
- store i32 1, i32* %P
- %X = load i32, i32* %P ; <i32> [#uses=1]
+ store i32 1, ptr %P
+ %X = load i32, ptr %P ; <i32> [#uses=1]
ret i32 %X
}
-define i32 @test9(i32* %P) {
+define i32 @test9(ptr %P) {
; CHECK-LABEL: @test9(
; CHECK-NEXT: ret i32 0
;
- %X = load i32, i32* %P ; <i32> [#uses=1]
- %Y = load i32, i32* %P ; <i32> [#uses=1]
+ %X = load i32, ptr %P ; <i32> [#uses=1]
+ %Y = load i32, ptr %P ; <i32> [#uses=1]
%Z = sub i32 %X, %Y ; <i32> [#uses=1]
ret i32 %Z
}
-define i32 @test10(i1 %C.upgrd.1, i32* %P, i32* %Q) {
+define i32 @test10(i1 %C.upgrd.1, ptr %P, ptr %Q) {
; CHECK-LABEL: @test10(
; CHECK-NEXT: br i1 [[C_UPGRD_1:%.*]], label [[T:%.*]], label [[F:%.*]]
; CHECK: T:
-; CHECK-NEXT: store i32 1, i32* [[Q:%.*]], align 4
+; CHECK-NEXT: store i32 1, ptr [[Q:%.*]], align 4
; CHECK-NEXT: br label [[C:%.*]]
; CHECK: F:
; CHECK-NEXT: br label [[C]]
; CHECK: C:
-; CHECK-NEXT: store i32 0, i32* [[P:%.*]], align 4
+; CHECK-NEXT: store i32 0, ptr [[P:%.*]], align 4
; CHECK-NEXT: ret i32 0
;
br i1 %C.upgrd.1, label %T, label %F
T: ; preds = %0
- store i32 1, i32* %Q
- store i32 0, i32* %P
+ store i32 1, ptr %Q
+ store i32 0, ptr %P
br label %C
F: ; preds = %0
- store i32 0, i32* %P
+ store i32 0, ptr %P
br label %C
C: ; preds = %F, %T
- %V = load i32, i32* %P ; <i32> [#uses=1]
+ %V = load i32, ptr %P ; <i32> [#uses=1]
ret i32 %V
}
-define double @test11(double* %p) {
+define double @test11(ptr %p) {
; CHECK-LABEL: @test11(
-; CHECK-NEXT: [[T0:%.*]] = getelementptr double, double* [[P:%.*]], i64 1
-; CHECK-NEXT: store double 2.000000e+00, double* [[T0]], align 8
+; CHECK-NEXT: [[T0:%.*]] = getelementptr double, ptr [[P:%.*]], i64 1
+; CHECK-NEXT: store double 2.000000e+00, ptr [[T0]], align 8
; CHECK-NEXT: ret double 2.000000e+00
;
- %t0 = getelementptr double, double* %p, i32 1
- store double 2.0, double* %t0
- %t1 = getelementptr double, double* %p, i32 1
- %x = load double, double* %t1
+ %t0 = getelementptr double, ptr %p, i32 1
+ store double 2.0, ptr %t0
+ %t1 = getelementptr double, ptr %p, i32 1
+ %x = load double, ptr %t1
ret double %x
}
-define i32 @test12(i32* %P) {
+define i32 @test12(ptr %P) {
; CHECK-LABEL: @test12(
; CHECK-NEXT: ret i32 123
;
%A = alloca i32
- store i32 123, i32* %A
+ store i32 123, ptr %A
; Cast the result of the load not the source
- %Q = bitcast i32* %A to i32*
- %V = load i32, i32* %Q
+ %V = load i32, ptr %A
ret i32 %V
}
; CHECK-LABEL: @test13(
; CHECK-NEXT: ret <16 x i8> zeroinitializer
;
- %tmp = load <16 x i8>, <16 x i8>* bitcast ([4 x i32]* @GLOBAL to <16 x i8>*)
+ %tmp = load <16 x i8>, ptr @GLOBAL
ret <16 x i8> %tmp
}
define i8 @test14(i8 %x, i32 %y) {
; CHECK-LABEL: @test14(
; CHECK-NEXT: [[A:%.*]] = alloca i32, align 4
-; CHECK-NEXT: [[A_I8:%.*]] = bitcast i32* [[A]] to i8*
-; CHECK-NEXT: store i8 [[X:%.*]], i8* [[A_I8]], align 4
-; CHECK-NEXT: store i32 [[Y:%.*]], i32* [[A]], align 4
-; CHECK-NEXT: [[R:%.*]] = load i8, i8* [[A_I8]], align 4
+; CHECK-NEXT: store i8 [[X:%.*]], ptr [[A]], align 4
+; CHECK-NEXT: store i32 [[Y:%.*]], ptr [[A]], align 4
+; CHECK-NEXT: [[R:%.*]] = load i8, ptr [[A]], align 4
; CHECK-NEXT: ret i8 [[R]]
;
%a = alloca i32
- %a.i8 = bitcast i32* %a to i8*
- store i8 %x, i8* %a.i8
- store i32 %y, i32* %a
- %r = load i8, i8* %a.i8
+ store i8 %x, ptr %a
+ store i32 %y, ptr %a
+ %r = load i8, ptr %a
ret i8 %r
}
define i8 @test15(i8 %x, i32 %y) {
; CHECK-LABEL: @test15(
-; CHECK-NEXT: store i8 [[X:%.*]], i8* bitcast (i32* @test15_global to i8*), align 4
-; CHECK-NEXT: store i32 [[Y:%.*]], i32* @test15_global, align 4
-; CHECK-NEXT: [[R:%.*]] = load i8, i8* bitcast (i32* @test15_global to i8*), align 4
+; CHECK-NEXT: store i8 [[X:%.*]], ptr @test15_global, align 4
+; CHECK-NEXT: store i32 [[Y:%.*]], ptr @test15_global, align 4
+; CHECK-NEXT: [[R:%.*]] = load i8, ptr @test15_global, align 4
; CHECK-NEXT: ret i8 [[R]]
;
- %g.i8 = bitcast i32* @test15_global to i8*
- store i8 %x, i8* %g.i8
- store i32 %y, i32* @test15_global
- %r = load i8, i8* %g.i8
+ store i8 %x, ptr @test15_global
+ store i32 %y, ptr @test15_global
+ %r = load i8, ptr @test15_global
ret i8 %r
}
; Check that we canonicalize loads which are only stored to use integer types
; when there is a valid integer type.
-define void @test16(i8* %x, i8* %a, i8* %b, i8* %c) {
+define void @test16(ptr %x, ptr %a, ptr %b, ptr %c) {
; CHECK-LABEL: @test16(
; CHECK-NEXT: entry:
-; CHECK-NEXT: [[X_CAST:%.*]] = bitcast i8* [[X:%.*]] to float*
-; CHECK-NEXT: [[A_CAST:%.*]] = bitcast i8* [[A:%.*]] to float*
-; CHECK-NEXT: [[B_CAST:%.*]] = bitcast i8* [[B:%.*]] to float*
-; CHECK-NEXT: [[X1:%.*]] = load float, float* [[X_CAST]], align 4
-; CHECK-NEXT: store float [[X1]], float* [[A_CAST]], align 4
-; CHECK-NEXT: store float [[X1]], float* [[B_CAST]], align 4
-; CHECK-NEXT: [[X2:%.*]] = load float, float* [[X_CAST]], align 4
-; CHECK-NEXT: store float [[X2]], float* [[B_CAST]], align 4
-; CHECK-NEXT: [[TMP0:%.*]] = bitcast i8* [[C:%.*]] to float*
-; CHECK-NEXT: store float [[X2]], float* [[TMP0]], align 4
+; CHECK-NEXT: [[X1:%.*]] = load float, ptr [[X:%.*]], align 4
+; CHECK-NEXT: store float [[X1]], ptr [[A:%.*]], align 4
+; CHECK-NEXT: store float [[X1]], ptr [[B:%.*]], align 4
+; CHECK-NEXT: [[X2:%.*]] = load float, ptr [[X:%.*]], align 4
+; CHECK-NEXT: store float [[X2]], ptr [[B:%.*]], align 4
+; CHECK-NEXT: store float [[X2]], ptr [[C:%.*]], align 4
; CHECK-NEXT: ret void
;
entry:
- %x.cast = bitcast i8* %x to float*
- %a.cast = bitcast i8* %a to float*
- %b.cast = bitcast i8* %b to float*
- %c.cast = bitcast i8* %c to i32*
- %x1 = load float, float* %x.cast
- store float %x1, float* %a.cast
- store float %x1, float* %b.cast
+ %x1 = load float, ptr %x
+ store float %x1, ptr %a
+ store float %x1, ptr %b
- %x2 = load float, float* %x.cast
- store float %x2, float* %b.cast
+ %x2 = load float, ptr %x
+ store float %x2, ptr %b
%x2.cast = bitcast float %x2 to i32
- store i32 %x2.cast, i32* %c.cast
+ store i32 %x2.cast, ptr %c
ret void
}
-define void @test16-vect(i8* %x, i8* %a, i8* %b, i8* %c) {
+define void @test16-vect(ptr %x, ptr %a, ptr %b, ptr %c) {
; CHECK-LABEL: @test16-vect(
; CHECK-NEXT: entry:
-; CHECK-NEXT: [[X_CAST:%.*]] = bitcast i8* [[X:%.*]] to <4 x i8>*
-; CHECK-NEXT: [[A_CAST:%.*]] = bitcast i8* [[A:%.*]] to <4 x i8>*
-; CHECK-NEXT: [[B_CAST:%.*]] = bitcast i8* [[B:%.*]] to <4 x i8>*
-; CHECK-NEXT: [[X1:%.*]] = load <4 x i8>, <4 x i8>* [[X_CAST]], align 4
-; CHECK-NEXT: store <4 x i8> [[X1]], <4 x i8>* [[A_CAST]], align 4
-; CHECK-NEXT: store <4 x i8> [[X1]], <4 x i8>* [[B_CAST]], align 4
-; CHECK-NEXT: [[X2:%.*]] = load <4 x i8>, <4 x i8>* [[X_CAST]], align 4
-; CHECK-NEXT: store <4 x i8> [[X2]], <4 x i8>* [[B_CAST]], align 4
-; CHECK-NEXT: [[TMP0:%.*]] = bitcast i8* [[C:%.*]] to <4 x i8>*
-; CHECK-NEXT: store <4 x i8> [[X2]], <4 x i8>* [[TMP0]], align 4
+; CHECK-NEXT: [[X1:%.*]] = load <4 x i8>, ptr [[X:%.*]], align 4
+; CHECK-NEXT: store <4 x i8> [[X1]], ptr [[A:%.*]], align 4
+; CHECK-NEXT: store <4 x i8> [[X1]], ptr [[B:%.*]], align 4
+; CHECK-NEXT: [[X2:%.*]] = load <4 x i8>, ptr [[X:%.*]], align 4
+; CHECK-NEXT: store <4 x i8> [[X2]], ptr [[B:%.*]], align 4
+; CHECK-NEXT: store <4 x i8> [[X2]], ptr [[C:%.*]], align 4
; CHECK-NEXT: ret void
;
entry:
- %x.cast = bitcast i8* %x to <4 x i8>*
- %a.cast = bitcast i8* %a to <4 x i8>*
- %b.cast = bitcast i8* %b to <4 x i8>*
- %c.cast = bitcast i8* %c to i32*
- %x1 = load <4 x i8>, <4 x i8>* %x.cast
- store <4 x i8> %x1, <4 x i8>* %a.cast
- store <4 x i8> %x1, <4 x i8>* %b.cast
+ %x1 = load <4 x i8>, ptr %x
+ store <4 x i8> %x1, ptr %a
+ store <4 x i8> %x1, ptr %b
- %x2 = load <4 x i8>, <4 x i8>* %x.cast
- store <4 x i8> %x2, <4 x i8>* %b.cast
+ %x2 = load <4 x i8>, ptr %x
+ store <4 x i8> %x2, ptr %b
%x2.cast = bitcast <4 x i8> %x2 to i32
- store i32 %x2.cast, i32* %c.cast
+ store i32 %x2.cast, ptr %c
ret void
}
; its only use is a store but it is used as the pointer to that store rather
; than the value.
-define void @test17(i8** %x, i8 %y) {
+define void @test17(ptr %x, i8 %y) {
; CHECK-LABEL: @test17(
; CHECK-NEXT: entry:
-; CHECK-NEXT: [[X_LOAD:%.*]] = load i8*, i8** [[X:%.*]], align 8
-; CHECK-NEXT: store i8 [[Y:%.*]], i8* [[X_LOAD]], align 1
+; CHECK-NEXT: [[X_LOAD:%.*]] = load ptr, ptr [[X:%.*]], align 8
+; CHECK-NEXT: store i8 [[Y:%.*]], ptr [[X_LOAD]], align 1
; CHECK-NEXT: ret void
;
entry:
- %x.load = load i8*, i8** %x
- store i8 %y, i8* %x.load
+ %x.load = load ptr, ptr %x
+ store i8 %y, ptr %x.load
ret void
}
; Check that we don't try change the type of the load by inserting a bitcast
; generating invalid IR.
%swift.error = type opaque
-declare void @useSwiftError(%swift.error** swifterror)
+declare void @useSwiftError(ptr swifterror)
-define void @test18(%swift.error** swifterror %err) {
+define void @test18(ptr swifterror %err) {
; CHECK-LABEL: @test18(
; CHECK-NEXT: entry:
-; CHECK-NEXT: [[SWIFTERROR:%.*]] = alloca swifterror %swift.error*, align 8
-; CHECK-NEXT: store %swift.error* null, %swift.error** [[SWIFTERROR]], align 8
-; CHECK-NEXT: call void @useSwiftError(%swift.error** nonnull swifterror [[SWIFTERROR]])
-; CHECK-NEXT: [[ERR_RES:%.*]] = load %swift.error*, %swift.error** [[SWIFTERROR]], align 8
-; CHECK-NEXT: store %swift.error* [[ERR_RES]], %swift.error** [[ERR:%.*]], align 8
+; CHECK-NEXT: [[SWIFTERROR:%.*]] = alloca swifterror ptr, align 8
+; CHECK-NEXT: store ptr null, ptr [[SWIFTERROR]], align 8
+; CHECK-NEXT: call void @useSwiftError(ptr nonnull swifterror [[SWIFTERROR]])
+; CHECK-NEXT: [[ERR_RES:%.*]] = load ptr, ptr [[SWIFTERROR]], align 8
+; CHECK-NEXT: store ptr [[ERR_RES]], ptr [[ERR:%.*]], align 8
; CHECK-NEXT: ret void
;
entry:
- %swifterror = alloca swifterror %swift.error*, align 8
- store %swift.error* null, %swift.error** %swifterror, align 8
- call void @useSwiftError(%swift.error** nonnull swifterror %swifterror)
- %err.res = load %swift.error*, %swift.error** %swifterror, align 8
- store %swift.error* %err.res, %swift.error** %err, align 8
+ %swifterror = alloca swifterror ptr, align 8
+ store ptr null, ptr %swifterror, align 8
+ call void @useSwiftError(ptr nonnull swifterror %swifterror)
+ %err.res = load ptr, ptr %swifterror, align 8
+ store ptr %err.res, ptr %err, align 8
ret void
}
; Make sure we preseve the type of the store to a swifterror pointer.
-declare void @initi8(i8**)
-define void @test19(%swift.error** swifterror %err) {
+declare void @initi8(ptr)
+define void @test19(ptr swifterror %err) {
; CHECK-LABEL: @test19(
; CHECK-NEXT: entry:
-; CHECK-NEXT: [[TMP:%.*]] = alloca i8*, align 8
-; CHECK-NEXT: call void @initi8(i8** nonnull [[TMP]])
-; CHECK-NEXT: [[SWIFTERROR:%.*]] = bitcast i8** [[TMP]] to %swift.error**
-; CHECK-NEXT: [[ERR_RES:%.*]] = load %swift.error*, %swift.error** [[SWIFTERROR]], align 8
-; CHECK-NEXT: store %swift.error* [[ERR_RES]], %swift.error** [[ERR:%.*]], align 8
+; CHECK-NEXT: [[TMP:%.*]] = alloca ptr, align 8
+; CHECK-NEXT: call void @initi8(ptr nonnull [[TMP]])
+; CHECK-NEXT: [[ERR_RES:%.*]] = load ptr, ptr [[TMP]], align 8
+; CHECK-NEXT: store ptr [[ERR_RES]], ptr [[ERR:%.*]], align 8
; CHECK-NEXT: ret void
;
entry:
- %tmp = alloca i8*, align 8
- call void @initi8(i8** %tmp)
- %swifterror = bitcast i8** %tmp to %swift.error**
- %err.res = load %swift.error*, %swift.error** %swifterror, align 8
- store %swift.error* %err.res, %swift.error** %err, align 8
+ %tmp = alloca ptr, align 8
+ call void @initi8(ptr %tmp)
+ %err.res = load ptr, ptr %tmp, align 8
+ store ptr %err.res, ptr %err, align 8
ret void
}
; Make sure we don't canonicalize accesses to scalable vectors.
-define void @test20(<vscale x 4 x i8>* %x, <vscale x 4 x i8>* %y) {
+define void @test20(ptr %x, ptr %y) {
; CHECK-LABEL: @test20(
-; CHECK-NEXT: [[X_LOAD:%.*]] = load <vscale x 4 x i8>, <vscale x 4 x i8>* [[X:%.*]], align 1
-; CHECK-NEXT: store <vscale x 4 x i8> [[X_LOAD]], <vscale x 4 x i8>* [[Y:%.*]], align 1
+; CHECK-NEXT: [[X_LOAD:%.*]] = load <vscale x 4 x i8>, ptr [[X:%.*]], align 1
+; CHECK-NEXT: store <vscale x 4 x i8> [[X_LOAD]], ptr [[Y:%.*]], align 1
; CHECK-NEXT: ret void
;
- %x.load = load <vscale x 4 x i8>, <vscale x 4 x i8>* %x, align 1
- store <vscale x 4 x i8> %x.load, <vscale x 4 x i8>* %y, align 1
+ %x.load = load <vscale x 4 x i8>, ptr %x, align 1
+ store <vscale x 4 x i8> %x.load, ptr %y, align 1
ret void
}
; Check that non-integral pointers are not coverted using inttoptr
-declare void @use(i8*)
-declare void @use.p1(i8 addrspace(1)*)
+declare void @use(ptr)
+declare void @use.p1(ptr addrspace(1))
-define i64 @test21(i64* %P) {
+define i64 @test21(ptr %P) {
; CHECK-LABEL: @test21(
-; CHECK-NEXT: [[X:%.*]] = load i64, i64* [[P:%.*]], align 8
-; CHECK-NEXT: [[Y_CAST:%.*]] = inttoptr i64 [[X]] to i8*
-; CHECK-NEXT: call void @use(i8* [[Y_CAST]])
+; CHECK-NEXT: [[X:%.*]] = load i64, ptr [[P:%.*]], align 8
+; CHECK-NEXT: [[Y_CAST:%.*]] = inttoptr i64 [[X]] to ptr
+; CHECK-NEXT: call void @use(ptr [[Y_CAST]])
; CHECK-NEXT: ret i64 [[X]]
;
- %P.ptr = bitcast i64* %P to i8**
- %X = load i64, i64* %P
- %Y = load i8*, i8** %P.ptr
- call void @use(i8* %Y)
+ %X = load i64, ptr %P
+ %Y = load ptr, ptr %P
+ call void @use(ptr %Y)
ret i64 %X
}
-define i64 @test22(i64* %P) {
+define i64 @test22(ptr %P) {
; CHECK-LABEL: @test22(
-; CHECK-NEXT: [[P_PTR:%.*]] = bitcast i64* [[P:%.*]] to i8 addrspace(1)**
-; CHECK-NEXT: [[X:%.*]] = load i64, i64* [[P]], align 8
-; CHECK-NEXT: [[Y:%.*]] = load i8 addrspace(1)*, i8 addrspace(1)** [[P_PTR]], align 8
-; CHECK-NEXT: call void @use.p1(i8 addrspace(1)* [[Y]])
+; CHECK-NEXT: [[X:%.*]] = load i64, ptr [[P]], align 8
+; CHECK-NEXT: [[Y:%.*]] = load ptr addrspace(1), ptr [[P:%.*]], align 8
+; CHECK-NEXT: call void @use.p1(ptr addrspace(1) [[Y]])
; CHECK-NEXT: ret i64 [[X]]
;
- %P.ptr = bitcast i64* %P to i8 addrspace(1)**
- %X = load i64, i64* %P
- %Y = load i8 addrspace(1)*, i8 addrspace(1)** %P.ptr
- call void @use.p1(i8 addrspace(1)* %Y)
+ %X = load i64, ptr %P
+ %Y = load ptr addrspace(1), ptr %P
+ call void @use.p1(ptr addrspace(1) %Y)
ret i64 %X
}
-declare void @use.v2.p0(<2 x i8*>)
-declare void @use.v2.p1(<2 x i8 addrspace(1)*>)
+declare void @use.v2.p0(<2 x ptr>)
+declare void @use.v2.p1(<2 x ptr addrspace(1)>)
-define <2 x i64> @test23(<2 x i64>* %P) {
+define <2 x i64> @test23(ptr %P) {
; CHECK-LABEL: @test23(
-; CHECK-NEXT: [[P_PTR:%.*]] = bitcast <2 x i64>* [[P:%.*]] to <2 x i8*>*
-; CHECK-NEXT: [[X:%.*]] = load <2 x i64>, <2 x i64>* [[P]], align 16
-; CHECK-NEXT: [[Y:%.*]] = load <2 x i8*>, <2 x i8*>* [[P_PTR]], align 16
-; CHECK-NEXT: call void @use.v2.p0(<2 x i8*> [[Y]])
+; CHECK-NEXT: [[X:%.*]] = load <2 x i64>, ptr [[P]], align 16
+; CHECK-NEXT: [[Y:%.*]] = load <2 x ptr>, ptr [[P:%.*]], align 16
+; CHECK-NEXT: call void @use.v2.p0(<2 x ptr> [[Y]])
; CHECK-NEXT: ret <2 x i64> [[X]]
;
- %P.ptr = bitcast <2 x i64>* %P to <2 x i8*>*
- %X = load <2 x i64>, <2 x i64>* %P
- %Y = load <2 x i8*>, <2 x i8*>* %P.ptr
- call void @use.v2.p0(<2 x i8*> %Y)
+ %X = load <2 x i64>, ptr %P
+ %Y = load <2 x ptr>, ptr %P
+ call void @use.v2.p0(<2 x ptr> %Y)
ret <2 x i64> %X
}
-define <2 x i64> @test24(<2 x i64>* %P) {
+define <2 x i64> @test24(ptr %P) {
; CHECK-LABEL: @test24(
-; CHECK-NEXT: [[P_PTR:%.*]] = bitcast <2 x i64>* [[P:%.*]] to <2 x i8 addrspace(1)*>*
-; CHECK-NEXT: [[X:%.*]] = load <2 x i64>, <2 x i64>* [[P]], align 16
-; CHECK-NEXT: [[Y:%.*]] = load <2 x i8 addrspace(1)*>, <2 x i8 addrspace(1)*>* [[P_PTR]], align 16
-; CHECK-NEXT: call void @use.v2.p1(<2 x i8 addrspace(1)*> [[Y]])
+; CHECK-NEXT: [[X:%.*]] = load <2 x i64>, ptr [[P]], align 16
+; CHECK-NEXT: [[Y:%.*]] = load <2 x ptr addrspace(1)>, ptr [[P:%.*]], align 16
+; CHECK-NEXT: call void @use.v2.p1(<2 x ptr addrspace(1)> [[Y]])
; CHECK-NEXT: ret <2 x i64> [[X]]
;
- %P.ptr = bitcast <2 x i64>* %P to <2 x i8 addrspace(1)*>*
- %X = load <2 x i64>, <2 x i64>* %P
- %Y = load <2 x i8 addrspace(1)*>, <2 x i8 addrspace(1)*>* %P.ptr
- call void @use.v2.p1(<2 x i8 addrspace(1)*> %Y)
+ %X = load <2 x i64>, ptr %P
+ %Y = load <2 x ptr addrspace(1)>, ptr %P
+ call void @use.v2.p1(<2 x ptr addrspace(1)> %Y)
ret <2 x i64> %X
}
; Instcombine should be able to do trivial CSE of loads.
-define i32 @test1(i32* %p) {
- %t0 = getelementptr i32, i32* %p, i32 1
- %y = load i32, i32* %t0
- %t1 = getelementptr i32, i32* %p, i32 1
- %x = load i32, i32* %t1
+define i32 @test1(ptr %p) {
+ %t0 = getelementptr i32, ptr %p, i32 1
+ %y = load i32, ptr %t0
+ %t1 = getelementptr i32, ptr %p, i32 1
+ %x = load i32, ptr %t1
%a = sub i32 %y, %x
ret i32 %a
; CHECK-LABEL: @test1(
; PR7429
@.str = private constant [4 x i8] c"XYZ\00"
define float @test2() {
- %tmp = load float, float* bitcast ([4 x i8]* @.str to float*), align 1
+ %tmp = load float, ptr @.str, align 1
ret float %tmp
; CHECK-LABEL: @test2(
; PR14986
define void @test3() nounwind {
; This is a weird way of computing zero.
- %l = load i32, i32* getelementptr ([36 x i32], [36 x i32]* @expect32, i32 29826161, i32 28), align 4
- store i32 %l, i32* getelementptr ([36 x i32], [36 x i32]* @rslts32, i32 29826161, i32 28), align 4
+ %l = load i32, ptr getelementptr ([36 x i32], ptr @expect32, i32 29826161, i32 28), align 4
+ store i32 %l, ptr getelementptr ([36 x i32], ptr @rslts32, i32 29826161, i32 28), align 4
ret void
; CHECK-LABEL: @test3(
-; CHECK: store i32 1, i32* getelementptr inbounds ([36 x i32], [36 x i32]* @rslts32, i32 0, i32 0)
+; CHECK: store i32 1, ptr @rslts32
}
; RUN: opt -basic-aa -instcombine -S < %s | FileCheck %s
; CHECK-LABEL: @test_load_combine_aa(
-; CHECK: %[[V:.*]] = load i32, i32* %0
-; CHECK: store i32 0, i32* %3
-; CHECK: store i32 %[[V]], i32* %1
-; CHECK: store i32 %[[V]], i32* %2
-define void @test_load_combine_aa(i32*, i32*, i32*, i32* noalias) {
- %a = load i32, i32* %0
- store i32 0, i32* %3
- %b = load i32, i32* %0
- store i32 %a, i32* %1
- store i32 %b, i32* %2
+; CHECK: %[[V:.*]] = load i32, ptr %0
+; CHECK: store i32 0, ptr %3
+; CHECK: store i32 %[[V]], ptr %1
+; CHECK: store i32 %[[V]], ptr %2
+define void @test_load_combine_aa(ptr, ptr, ptr, ptr noalias) {
+ %a = load i32, ptr %0
+ store i32 0, ptr %3
+ %b = load i32, ptr %0
+ store i32 %a, ptr %1
+ store i32 %b, ptr %2
ret void
}
define <2 x i64> @static_hem() {
; CHECK-LABEL: @static_hem(
-; CHECK-NEXT: [[TMP1:%.*]] = load <2 x i64>, <2 x i64>* getelementptr (<2 x i64>, <2 x i64>* @x, i64 7), align 16
+; CHECK-NEXT: [[TMP1:%.*]] = load <2 x i64>, ptr getelementptr (<2 x i64>, ptr @x, i64 7), align 16
; CHECK-NEXT: ret <2 x i64> [[TMP1]]
;
- %t = getelementptr <2 x i64>, <2 x i64>* @x, i32 7
- %tmp1 = load <2 x i64>, <2 x i64>* %t, align 1
+ %t = getelementptr <2 x i64>, ptr @x, i32 7
+ %tmp1 = load <2 x i64>, ptr %t, align 1
ret <2 x i64> %tmp1
}
define <2 x i64> @hem(i32 %i) {
; CHECK-LABEL: @hem(
; CHECK-NEXT: [[TMP1:%.*]] = sext i32 [[I:%.*]] to i64
-; CHECK-NEXT: [[T:%.*]] = getelementptr <2 x i64>, <2 x i64>* @x, i64 [[TMP1]]
-; CHECK-NEXT: [[TMP1:%.*]] = load <2 x i64>, <2 x i64>* [[T]], align 16
+; CHECK-NEXT: [[T:%.*]] = getelementptr <2 x i64>, ptr @x, i64 [[TMP1]]
+; CHECK-NEXT: [[TMP1:%.*]] = load <2 x i64>, ptr [[T]], align 16
; CHECK-NEXT: ret <2 x i64> [[TMP1]]
;
- %t = getelementptr <2 x i64>, <2 x i64>* @x, i32 %i
- %tmp1 = load <2 x i64>, <2 x i64>* %t, align 1
+ %t = getelementptr <2 x i64>, ptr @x, i32 %i
+ %tmp1 = load <2 x i64>, ptr %t, align 1
ret <2 x i64> %tmp1
}
; CHECK-LABEL: @hem_2d(
; CHECK-NEXT: [[TMP1:%.*]] = sext i32 [[I:%.*]] to i64
; CHECK-NEXT: [[TMP2:%.*]] = sext i32 [[J:%.*]] to i64
-; CHECK-NEXT: [[T:%.*]] = getelementptr [13 x <2 x i64>], [13 x <2 x i64>]* @xx, i64 [[TMP1]], i64 [[TMP2]]
-; CHECK-NEXT: [[TMP1:%.*]] = load <2 x i64>, <2 x i64>* [[T]], align 16
+; CHECK-NEXT: [[T:%.*]] = getelementptr [13 x <2 x i64>], ptr @xx, i64 [[TMP1]], i64 [[TMP2]]
+; CHECK-NEXT: [[TMP1:%.*]] = load <2 x i64>, ptr [[T]], align 16
; CHECK-NEXT: ret <2 x i64> [[TMP1]]
;
- %t = getelementptr [13 x <2 x i64>], [13 x <2 x i64>]* @xx, i32 %i, i32 %j
- %tmp1 = load <2 x i64>, <2 x i64>* %t, align 1
+ %t = getelementptr [13 x <2 x i64>], ptr @xx, i32 %i, i32 %j
+ %tmp1 = load <2 x i64>, ptr %t, align 1
ret <2 x i64> %tmp1
}
define <2 x i64> @foo() {
; CHECK-LABEL: @foo(
-; CHECK-NEXT: [[TMP1:%.*]] = load <2 x i64>, <2 x i64>* @x, align 16
+; CHECK-NEXT: [[TMP1:%.*]] = load <2 x i64>, ptr @x, align 16
; CHECK-NEXT: ret <2 x i64> [[TMP1]]
;
- %tmp1 = load <2 x i64>, <2 x i64>* @x, align 1
+ %tmp1 = load <2 x i64>, ptr @x, align 1
ret <2 x i64> %tmp1
}
define <2 x i64> @bar() {
; CHECK-LABEL: @bar(
; CHECK-NEXT: [[T:%.*]] = alloca <2 x i64>, align 16
-; CHECK-NEXT: call void @kip(<2 x i64>* nonnull [[T]])
-; CHECK-NEXT: [[TMP1:%.*]] = load <2 x i64>, <2 x i64>* [[T]], align 16
+; CHECK-NEXT: call void @kip(ptr nonnull [[T]])
+; CHECK-NEXT: [[TMP1:%.*]] = load <2 x i64>, ptr [[T]], align 16
; CHECK-NEXT: ret <2 x i64> [[TMP1]]
;
%t = alloca <2 x i64>
- call void @kip(<2 x i64>* %t)
- %tmp1 = load <2 x i64>, <2 x i64>* %t, align 1
+ call void @kip(ptr %t)
+ %tmp1 = load <2 x i64>, ptr %t, align 1
ret <2 x i64> %tmp1
}
define void @static_hem_store(<2 x i64> %y) {
; CHECK-LABEL: @static_hem_store(
-; CHECK-NEXT: store <2 x i64> [[Y:%.*]], <2 x i64>* getelementptr (<2 x i64>, <2 x i64>* @x, i64 7), align 16
+; CHECK-NEXT: store <2 x i64> [[Y:%.*]], ptr getelementptr (<2 x i64>, ptr @x, i64 7), align 16
; CHECK-NEXT: ret void
;
- %t = getelementptr <2 x i64>, <2 x i64>* @x, i32 7
- store <2 x i64> %y, <2 x i64>* %t, align 1
+ %t = getelementptr <2 x i64>, ptr @x, i32 7
+ store <2 x i64> %y, ptr %t, align 1
ret void
}
define void @hem_store(i32 %i, <2 x i64> %y) {
; CHECK-LABEL: @hem_store(
; CHECK-NEXT: [[TMP1:%.*]] = sext i32 [[I:%.*]] to i64
-; CHECK-NEXT: [[T:%.*]] = getelementptr <2 x i64>, <2 x i64>* @x, i64 [[TMP1]]
-; CHECK-NEXT: store <2 x i64> [[Y:%.*]], <2 x i64>* [[T]], align 16
+; CHECK-NEXT: [[T:%.*]] = getelementptr <2 x i64>, ptr @x, i64 [[TMP1]]
+; CHECK-NEXT: store <2 x i64> [[Y:%.*]], ptr [[T]], align 16
; CHECK-NEXT: ret void
;
- %t = getelementptr <2 x i64>, <2 x i64>* @x, i32 %i
- store <2 x i64> %y, <2 x i64>* %t, align 1
+ %t = getelementptr <2 x i64>, ptr @x, i32 %i
+ store <2 x i64> %y, ptr %t, align 1
ret void
}
; CHECK-LABEL: @hem_2d_store(
; CHECK-NEXT: [[TMP1:%.*]] = sext i32 [[I:%.*]] to i64
; CHECK-NEXT: [[TMP2:%.*]] = sext i32 [[J:%.*]] to i64
-; CHECK-NEXT: [[T:%.*]] = getelementptr [13 x <2 x i64>], [13 x <2 x i64>]* @xx, i64 [[TMP1]], i64 [[TMP2]]
-; CHECK-NEXT: store <2 x i64> [[Y:%.*]], <2 x i64>* [[T]], align 16
+; CHECK-NEXT: [[T:%.*]] = getelementptr [13 x <2 x i64>], ptr @xx, i64 [[TMP1]], i64 [[TMP2]]
+; CHECK-NEXT: store <2 x i64> [[Y:%.*]], ptr [[T]], align 16
; CHECK-NEXT: ret void
;
- %t = getelementptr [13 x <2 x i64>], [13 x <2 x i64>]* @xx, i32 %i, i32 %j
- store <2 x i64> %y, <2 x i64>* %t, align 1
+ %t = getelementptr [13 x <2 x i64>], ptr @xx, i32 %i, i32 %j
+ store <2 x i64> %y, ptr %t, align 1
ret void
}
define void @foo_store(<2 x i64> %y) {
; CHECK-LABEL: @foo_store(
-; CHECK-NEXT: store <2 x i64> [[Y:%.*]], <2 x i64>* @x, align 16
+; CHECK-NEXT: store <2 x i64> [[Y:%.*]], ptr @x, align 16
; CHECK-NEXT: ret void
;
- store <2 x i64> %y, <2 x i64>* @x, align 1
+ store <2 x i64> %y, ptr @x, align 1
ret void
}
define void @bar_store(<2 x i64> %y) {
; CHECK-LABEL: @bar_store(
; CHECK-NEXT: [[T:%.*]] = alloca <2 x i64>, align 16
-; CHECK-NEXT: call void @kip(<2 x i64>* nonnull [[T]])
-; CHECK-NEXT: store <2 x i64> [[Y:%.*]], <2 x i64>* [[T]], align 16
+; CHECK-NEXT: call void @kip(ptr nonnull [[T]])
+; CHECK-NEXT: store <2 x i64> [[Y:%.*]], ptr [[T]], align 16
; CHECK-NEXT: ret void
;
%t = alloca <2 x i64>
- call void @kip(<2 x i64>* %t)
- store <2 x i64> %y, <2 x i64>* %t, align 1
+ call void @kip(ptr %t)
+ store <2 x i64> %y, ptr %t, align 1
ret void
}
-declare void @kip(<2 x i64>* %t)
+declare void @kip(ptr %t)
target datalayout = "e-m:e-p:64:64:64-i64:64-f80:128-n8:16:32:64-S128"
-define i32 @test_load_cast_combine_tbaa(float* %ptr) {
+define i32 @test_load_cast_combine_tbaa(ptr %ptr) {
; Ensure (cast (load (...))) -> (load (cast (...))) preserves TBAA.
; CHECK-LABEL: @test_load_cast_combine_tbaa(
; CHECK-NEXT: entry:
-; CHECK-NEXT: [[TMP0:%.*]] = bitcast float* [[PTR:%.*]] to i32*
-; CHECK-NEXT: [[L1:%.*]] = load i32, i32* [[TMP0]], align 4, !tbaa [[TBAA0:![0-9]+]]
+; CHECK-NEXT: [[L1:%.*]] = load i32, ptr [[PTR:%.*]], align 4, !tbaa [[TBAA0:![0-9]+]]
; CHECK-NEXT: ret i32 [[L1]]
;
entry:
- %l = load float, float* %ptr, !tbaa !0
+ %l = load float, ptr %ptr, !tbaa !0
%c = bitcast float %l to i32
ret i32 %c
}
-define i32 @test_load_cast_combine_noalias(float* %ptr) {
+define i32 @test_load_cast_combine_noalias(ptr %ptr) {
; Ensure (cast (load (...))) -> (load (cast (...))) preserves no-alias metadata.
; CHECK-LABEL: @test_load_cast_combine_noalias(
; CHECK-NEXT: entry:
-; CHECK-NEXT: [[TMP0:%.*]] = bitcast float* [[PTR:%.*]] to i32*
-; CHECK-NEXT: [[L1:%.*]] = load i32, i32* [[TMP0]], align 4, !alias.scope !3, !noalias !3
+; CHECK-NEXT: [[L1:%.*]] = load i32, ptr [[PTR:%.*]], align 4, !alias.scope !3, !noalias !3
; CHECK-NEXT: ret i32 [[L1]]
;
entry:
- %l = load float, float* %ptr, !alias.scope !3, !noalias !3
+ %l = load float, ptr %ptr, !alias.scope !3, !noalias !3
%c = bitcast float %l to i32
ret i32 %c
}
-define float @test_load_cast_combine_range(i32* %ptr) {
+define float @test_load_cast_combine_range(ptr %ptr) {
; Ensure (cast (load (...))) -> (load (cast (...))) drops range metadata. It
; would be nice to preserve or update it somehow but this is hard when moving
; between types.
; CHECK-LABEL: @test_load_cast_combine_range(
; CHECK-NEXT: entry:
-; CHECK-NEXT: [[TMP0:%.*]] = bitcast i32* [[PTR:%.*]] to float*
-; CHECK-NEXT: [[L1:%.*]] = load float, float* [[TMP0]], align 4
+; CHECK-NEXT: [[L1:%.*]] = load float, ptr [[PTR:%.*]], align 4
; CHECK-NEXT: ret float [[L1]]
;
entry:
- %l = load i32, i32* %ptr, !range !6
+ %l = load i32, ptr %ptr, !range !6
%c = bitcast i32 %l to float
ret float %c
}
-define i32 @test_load_cast_combine_invariant(float* %ptr) {
+define i32 @test_load_cast_combine_invariant(ptr %ptr) {
; Ensure (cast (load (...))) -> (load (cast (...))) preserves invariant metadata.
; CHECK-LABEL: @test_load_cast_combine_invariant(
; CHECK-NEXT: entry:
-; CHECK-NEXT: [[TMP0:%.*]] = bitcast float* [[PTR:%.*]] to i32*
-; CHECK-NEXT: [[L1:%.*]] = load i32, i32* [[TMP0]], align 4, !invariant.load !6
+; CHECK-NEXT: [[L1:%.*]] = load i32, ptr [[PTR:%.*]], align 4, !invariant.load !6
; CHECK-NEXT: ret i32 [[L1]]
;
entry:
- %l = load float, float* %ptr, !invariant.load !7
+ %l = load float, ptr %ptr, !invariant.load !7
%c = bitcast float %l to i32
ret i32 %c
}
-define i32 @test_load_cast_combine_nontemporal(float* %ptr) {
+define i32 @test_load_cast_combine_nontemporal(ptr %ptr) {
; Ensure (cast (load (...))) -> (load (cast (...))) preserves nontemporal
; metadata.
; CHECK-LABEL: @test_load_cast_combine_nontemporal(
; CHECK-NEXT: entry:
-; CHECK-NEXT: [[TMP0:%.*]] = bitcast float* [[PTR:%.*]] to i32*
-; CHECK-NEXT: [[L1:%.*]] = load i32, i32* [[TMP0]], align 4, !nontemporal !7
+; CHECK-NEXT: [[L1:%.*]] = load i32, ptr [[PTR:%.*]], align 4, !nontemporal !7
; CHECK-NEXT: ret i32 [[L1]]
;
entry:
- %l = load float, float* %ptr, !nontemporal !8
+ %l = load float, ptr %ptr, !nontemporal !8
%c = bitcast float %l to i32
ret i32 %c
}
-define i8* @test_load_cast_combine_align(i32** %ptr) {
+define ptr @test_load_cast_combine_align(ptr %ptr) {
; Ensure (cast (load (...))) -> (load (cast (...))) preserves align
; metadata.
; CHECK-LABEL: @test_load_cast_combine_align(
; CHECK-NEXT: entry:
-; CHECK-NEXT: [[TMP0:%.*]] = bitcast i32** [[PTR:%.*]] to i8**
-; CHECK-NEXT: [[L1:%.*]] = load i8*, i8** [[TMP0]], align 8, !align !8
-; CHECK-NEXT: ret i8* [[L1]]
+; CHECK-NEXT: [[L1:%.*]] = load ptr, ptr [[PTR:%.*]], align 8, !align !8
+; CHECK-NEXT: ret ptr [[L1]]
;
entry:
- %l = load i32*, i32** %ptr, !align !9
- %c = bitcast i32* %l to i8*
- ret i8* %c
+ %l = load ptr, ptr %ptr, !align !9
+ ret ptr %l
}
-define i8* @test_load_cast_combine_deref(i32** %ptr) {
+define ptr @test_load_cast_combine_deref(ptr %ptr) {
; Ensure (cast (load (...))) -> (load (cast (...))) preserves dereferenceable
; metadata.
; CHECK-LABEL: @test_load_cast_combine_deref(
; CHECK-NEXT: entry:
-; CHECK-NEXT: [[TMP0:%.*]] = bitcast i32** [[PTR:%.*]] to i8**
-; CHECK-NEXT: [[L1:%.*]] = load i8*, i8** [[TMP0]], align 8, !dereferenceable !8
-; CHECK-NEXT: ret i8* [[L1]]
+; CHECK-NEXT: [[L1:%.*]] = load ptr, ptr [[PTR:%.*]], align 8, !dereferenceable !8
+; CHECK-NEXT: ret ptr [[L1]]
;
entry:
- %l = load i32*, i32** %ptr, !dereferenceable !9
- %c = bitcast i32* %l to i8*
- ret i8* %c
+ %l = load ptr, ptr %ptr, !dereferenceable !9
+ ret ptr %l
}
-define i8* @test_load_cast_combine_deref_or_null(i32** %ptr) {
+define ptr @test_load_cast_combine_deref_or_null(ptr %ptr) {
; Ensure (cast (load (...))) -> (load (cast (...))) preserves
; dereferenceable_or_null metadata.
; CHECK-LABEL: @test_load_cast_combine_deref_or_null(
; CHECK-NEXT: entry:
-; CHECK-NEXT: [[TMP0:%.*]] = bitcast i32** [[PTR:%.*]] to i8**
-; CHECK-NEXT: [[L1:%.*]] = load i8*, i8** [[TMP0]], align 8, !dereferenceable_or_null !8
-; CHECK-NEXT: ret i8* [[L1]]
+; CHECK-NEXT: [[L1:%.*]] = load ptr, ptr [[PTR:%.*]], align 8, !dereferenceable_or_null !8
+; CHECK-NEXT: ret ptr [[L1]]
;
entry:
- %l = load i32*, i32** %ptr, !dereferenceable_or_null !9
- %c = bitcast i32* %l to i8*
- ret i8* %c
+ %l = load ptr, ptr %ptr, !dereferenceable_or_null !9
+ ret ptr %l
}
-define void @test_load_cast_combine_loop(float* %src, i32* %dst, i32 %n) {
+define void @test_load_cast_combine_loop(ptr %src, ptr %dst, i32 %n) {
; Ensure (cast (load (...))) -> (load (cast (...))) preserves loop access
; metadata.
; CHECK-LABEL: @test_load_cast_combine_loop(
; CHECK: loop:
; CHECK-NEXT: [[I:%.*]] = phi i32 [ 0, [[ENTRY:%.*]] ], [ [[I_NEXT:%.*]], [[LOOP]] ]
; CHECK-NEXT: [[TMP0:%.*]] = sext i32 [[I]] to i64
-; CHECK-NEXT: [[SRC_GEP:%.*]] = getelementptr inbounds float, float* [[SRC:%.*]], i64 [[TMP0]]
+; CHECK-NEXT: [[SRC_GEP:%.*]] = getelementptr inbounds float, ptr [[SRC:%.*]], i64 [[TMP0]]
; CHECK-NEXT: [[TMP1:%.*]] = sext i32 [[I]] to i64
-; CHECK-NEXT: [[DST_GEP:%.*]] = getelementptr inbounds i32, i32* [[DST:%.*]], i64 [[TMP1]]
-; CHECK-NEXT: [[TMP2:%.*]] = bitcast float* [[SRC_GEP]] to i32*
-; CHECK-NEXT: [[L1:%.*]] = load i32, i32* [[TMP2]], align 4, !llvm.access.group !9
-; CHECK-NEXT: store i32 [[L1]], i32* [[DST_GEP]], align 4
+; CHECK-NEXT: [[DST_GEP:%.*]] = getelementptr inbounds i32, ptr [[DST:%.*]], i64 [[TMP1]]
+; CHECK-NEXT: [[L1:%.*]] = load i32, ptr [[SRC_GEP]], align 4, !llvm.access.group !9
+; CHECK-NEXT: store i32 [[L1]], ptr [[DST_GEP]], align 4
; CHECK-NEXT: [[I_NEXT]] = add i32 [[I]], 1
; CHECK-NEXT: [[CMP:%.*]] = icmp slt i32 [[I_NEXT]], [[N:%.*]]
; CHECK-NEXT: br i1 [[CMP]], label [[LOOP]], label [[EXIT:%.*]], !llvm.loop [[LOOP1:![0-9]+]]
loop:
%i = phi i32 [ 0, %entry ], [ %i.next, %loop ]
- %src.gep = getelementptr inbounds float, float* %src, i32 %i
- %dst.gep = getelementptr inbounds i32, i32* %dst, i32 %i
- %l = load float, float* %src.gep, !llvm.access.group !10
+ %src.gep = getelementptr inbounds float, ptr %src, i32 %i
+ %dst.gep = getelementptr inbounds i32, ptr %dst, i32 %i
+ %l = load float, ptr %src.gep, !llvm.access.group !10
%c = bitcast float %l to i32
- store i32 %c, i32* %dst.gep
+ store i32 %c, ptr %dst.gep
%i.next = add i32 %i, 1
%cmp = icmp slt i32 %i.next, %n
br i1 %cmp, label %loop, label %exit, !llvm.loop !1
ret void
}
-define void @test_load_cast_combine_nonnull(float** %ptr) {
+define void @test_load_cast_combine_nonnull(ptr %ptr) {
; CHECK-LABEL: @test_load_cast_combine_nonnull(
; CHECK-NEXT: entry:
-; CHECK-NEXT: [[P:%.*]] = load float*, float** [[PTR:%.*]], align 8, !nonnull !10
-; CHECK-NEXT: [[GEP:%.*]] = getelementptr float*, float** [[PTR]], i64 42
-; CHECK-NEXT: store float* [[P]], float** [[GEP]], align 8
+; CHECK-NEXT: [[P:%.*]] = load ptr, ptr [[PTR:%.*]], align 8, !nonnull !10
+; CHECK-NEXT: [[GEP:%.*]] = getelementptr ptr, ptr [[PTR]], i64 42
+; CHECK-NEXT: store ptr [[P]], ptr [[GEP]], align 8
; CHECK-NEXT: ret void
;
entry:
- %p = load float*, float** %ptr, !nonnull !6
- %gep = getelementptr float*, float** %ptr, i32 42
- store float* %p, float** %gep
+ %p = load ptr, ptr %ptr, !nonnull !6
+ %gep = getelementptr ptr, ptr %ptr, i32 42
+ store ptr %p, ptr %gep
ret void
}
ret double %log
}
-define float @function_pointer(float ()* %fptr, float %p1) {
+define float @function_pointer(ptr %fptr, float %p1) {
; CHECK-LABEL: @function_pointer(
; CHECK-NEXT: [[PTR:%.*]] = call float [[FPTR:%.*]]()
; CHECK-NEXT: [[LOG:%.*]] = call float @logf(float [[PTR]])
ret double %log
}
-define double @pr43617(double %d, i32 %i, double (i32)* %f) {
+define double @pr43617(double %d, i32 %i, ptr %f) {
entry:
%sub = fsub double -0.000000e+00, %d
%icall = tail call fast double %f(i32 %i)
entry:
%retval = alloca i32, align 4
%d1 = alloca i32, align 4
- store i32 0, i32* %retval, align 4
- %0 = bitcast i32* %d1 to i8*, !dbg !17
- call void @llvm.lifetime.start.p0i8(i64 4, i8* %0) #4, !dbg !17
+ store i32 0, ptr %retval, align 4
+ call void @llvm.lifetime.start.p0(i64 4, ptr %d1) #4, !dbg !17
; CHECK: dbg.value(metadata i32 42, metadata [[METADATA_IDX1:![0-9]+]], metadata !DIExpression())
; CHECK-NEXT: store
- call void @llvm.dbg.declare(metadata i32* %d1, metadata !16, metadata !DIExpression()), !dbg !17
- store i32 42, i32* %d1, align 4, !dbg !17
+ call void @llvm.dbg.declare(metadata ptr %d1, metadata !16, metadata !DIExpression()), !dbg !17
+ store i32 42, ptr %d1, align 4, !dbg !17
br label %while.cond, !dbg !22
while.cond: ; preds = %while.body, %entry
-; CHECK: dbg.value(metadata i32 %1, metadata [[METADATA_IDX1]], metadata !DIExpression())
+; CHECK: dbg.value(metadata i32 %0, metadata [[METADATA_IDX1]], metadata !DIExpression())
; CHECK-NEXT: call zeroext i1 @_ZL5emptyi
- %1 = load i32, i32* %d1, align 4, !dbg !22
- %call = call zeroext i1 @_ZL5emptyi(i32 %1), !dbg !22
+ %0 = load i32, ptr %d1, align 4, !dbg !22
+ %call = call zeroext i1 @_ZL5emptyi(i32 %0), !dbg !22
%lnot = xor i1 %call, true, !dbg !22
br i1 %lnot, label %while.body, label %while.end, !dbg !22
while.body: ; preds = %while.cond
-; CHECK: dbg.value(metadata i32* %d1, metadata [[METADATA_IDX1]], metadata !DIExpression(DW_OP_deref))
+; CHECK: dbg.value(metadata ptr %d1, metadata [[METADATA_IDX1]], metadata !DIExpression(DW_OP_deref))
; CHECK-NEXT: call void @_ZL6escapeRi
- call void @_ZL6escapeRi(i32* dereferenceable(4) %d1), !dbg !23
+ call void @_ZL6escapeRi(ptr dereferenceable(4) %d1), !dbg !23
br label %while.cond, !dbg !22, !llvm.loop !24
while.end: ; preds = %while.cond
- %2 = bitcast i32* %d1 to i8*, !dbg !25
- call void @llvm.lifetime.end.p0i8(i64 4, i8* %2) #4, !dbg !25
+ call void @llvm.lifetime.end.p0(i64 4, ptr %d1) #4, !dbg !25
ret i32 0, !dbg !26
}
-declare void @llvm.lifetime.start.p0i8(i64 immarg, i8* nocapture)
+declare void @llvm.lifetime.start.p0(i64 immarg, ptr nocapture)
declare void @llvm.dbg.declare(metadata, metadata, metadata)
ret i1 false
}
-define internal void @_ZL6escapeRi(i32* dereferenceable(4) %c) #3 !dbg !34 {
+define internal void @_ZL6escapeRi(ptr dereferenceable(4) %c) #3 !dbg !34 {
ret void
}
-declare void @llvm.lifetime.end.p0i8(i64 immarg, i8* nocapture) #1
+declare void @llvm.lifetime.end.p0(i64 immarg, ptr nocapture) #1
!llvm.dbg.cu = !{!2}
!llvm.module.flags = !{!8, !9, !10}
; Extra use
; Fold happened
-define i1 @scalar_lshr_and_negC_eq_extra_use_lshr(i32 %x, i32 %y, i32 %z, i32* %p) {
+define i1 @scalar_lshr_and_negC_eq_extra_use_lshr(i32 %x, i32 %y, i32 %z, ptr %p) {
; CHECK-LABEL: @scalar_lshr_and_negC_eq_extra_use_lshr(
; CHECK-NEXT: [[LSHR:%.*]] = lshr i32 [[X:%.*]], [[Y:%.*]]
; CHECK-NEXT: [[XOR:%.*]] = xor i32 [[LSHR]], [[Z:%.*]]
-; CHECK-NEXT: store i32 [[XOR]], i32* [[P:%.*]], align 4
+; CHECK-NEXT: store i32 [[XOR]], ptr [[P:%.*]], align 4
; CHECK-NEXT: [[R:%.*]] = icmp ult i32 [[LSHR]], 8
; CHECK-NEXT: ret i1 [[R]]
;
%lshr = lshr i32 %x, %y
%xor = xor i32 %lshr, %z ; extra use of lshr
- store i32 %xor, i32* %p
+ store i32 %xor, ptr %p
%and = and i32 %lshr, 4294967288 ; ~7
%r = icmp eq i32 %and, 0
ret i1 %r
}
; Not fold
-define i1 @scalar_lshr_and_negC_eq_extra_use_and(i32 %x, i32 %y, i32 %z, i32* %p) {
+define i1 @scalar_lshr_and_negC_eq_extra_use_and(i32 %x, i32 %y, i32 %z, ptr %p) {
; CHECK-LABEL: @scalar_lshr_and_negC_eq_extra_use_and(
; CHECK-NEXT: [[LSHR:%.*]] = lshr i32 [[X:%.*]], [[Y:%.*]]
; CHECK-NEXT: [[AND:%.*]] = and i32 [[LSHR]], -8
; CHECK-NEXT: [[MUL:%.*]] = mul i32 [[AND]], [[Z:%.*]]
-; CHECK-NEXT: store i32 [[MUL]], i32* [[P:%.*]], align 4
+; CHECK-NEXT: store i32 [[MUL]], ptr [[P:%.*]], align 4
; CHECK-NEXT: [[R:%.*]] = icmp eq i32 [[AND]], 0
; CHECK-NEXT: ret i1 [[R]]
;
%lshr = lshr i32 %x, %y
%and = and i32 %lshr, 4294967288 ; ~7
%mul = mul i32 %and, %z ; extra use of and
- store i32 %mul, i32* %p
+ store i32 %mul, ptr %p
%r = icmp eq i32 %and, 0
ret i1 %r
}
; Not fold
-define i1 @scalar_lshr_and_negC_eq_extra_use_lshr_and(i32 %x, i32 %y, i32 %z, i32* %p, i32* %q) {
+define i1 @scalar_lshr_and_negC_eq_extra_use_lshr_and(i32 %x, i32 %y, i32 %z, ptr %p, ptr %q) {
; CHECK-LABEL: @scalar_lshr_and_negC_eq_extra_use_lshr_and(
; CHECK-NEXT: [[LSHR:%.*]] = lshr i32 [[X:%.*]], [[Y:%.*]]
; CHECK-NEXT: [[AND:%.*]] = and i32 [[LSHR]], -8
-; CHECK-NEXT: store i32 [[AND]], i32* [[P:%.*]], align 4
+; CHECK-NEXT: store i32 [[AND]], ptr [[P:%.*]], align 4
; CHECK-NEXT: [[ADD:%.*]] = add i32 [[LSHR]], [[Z:%.*]]
-; CHECK-NEXT: store i32 [[ADD]], i32* [[Q:%.*]], align 4
+; CHECK-NEXT: store i32 [[ADD]], ptr [[Q:%.*]], align 4
; CHECK-NEXT: [[R:%.*]] = icmp eq i32 [[AND]], 0
; CHECK-NEXT: ret i1 [[R]]
;
%lshr = lshr i32 %x, %y
%and = and i32 %lshr, 4294967288 ; ~7
- store i32 %and, i32* %p ; extra use of and
+ store i32 %and, ptr %p ; extra use of and
%add = add i32 %lshr, %z ; extra use of lshr
- store i32 %add, i32* %q
+ store i32 %add, ptr %q
%r = icmp eq i32 %and, 0
ret i1 %r
}
; Extra use
; Fold happened
-define i1 @scalar_lshr_and_signbit_eq_extra_use_lshr(i32 %x, i32 %y, i32 %z, i32* %p) {
+define i1 @scalar_lshr_and_signbit_eq_extra_use_lshr(i32 %x, i32 %y, i32 %z, ptr %p) {
; CHECK-LABEL: @scalar_lshr_and_signbit_eq_extra_use_lshr(
; CHECK-NEXT: [[LSHR:%.*]] = lshr i32 [[X:%.*]], [[Y:%.*]]
; CHECK-NEXT: [[XOR:%.*]] = xor i32 [[LSHR]], [[Z:%.*]]
-; CHECK-NEXT: store i32 [[XOR]], i32* [[P:%.*]], align 4
+; CHECK-NEXT: store i32 [[XOR]], ptr [[P:%.*]], align 4
; CHECK-NEXT: [[R:%.*]] = icmp sgt i32 [[LSHR]], -1
; CHECK-NEXT: ret i1 [[R]]
;
%lshr = lshr i32 %x, %y
%xor = xor i32 %lshr, %z ; extra use of lshr
- store i32 %xor, i32* %p
+ store i32 %xor, ptr %p
%and = and i32 %lshr, 2147483648
%r = icmp eq i32 %and, 0
ret i1 %r
}
; Not fold
-define i1 @scalar_lshr_and_signbit_eq_extra_use_and(i32 %x, i32 %y, i32 %z, i32* %p) {
+define i1 @scalar_lshr_and_signbit_eq_extra_use_and(i32 %x, i32 %y, i32 %z, ptr %p) {
; CHECK-LABEL: @scalar_lshr_and_signbit_eq_extra_use_and(
; CHECK-NEXT: [[LSHR:%.*]] = lshr i32 [[X:%.*]], [[Y:%.*]]
; CHECK-NEXT: [[AND:%.*]] = and i32 [[LSHR]], -2147483648
; CHECK-NEXT: [[MUL:%.*]] = mul i32 [[AND]], [[Z:%.*]]
-; CHECK-NEXT: store i32 [[MUL]], i32* [[P:%.*]], align 4
+; CHECK-NEXT: store i32 [[MUL]], ptr [[P:%.*]], align 4
; CHECK-NEXT: [[R:%.*]] = icmp eq i32 [[AND]], 0
; CHECK-NEXT: ret i1 [[R]]
;
%lshr = lshr i32 %x, %y
%and = and i32 %lshr, 2147483648
%mul = mul i32 %and, %z ; extra use of and
- store i32 %mul, i32* %p
+ store i32 %mul, ptr %p
%r = icmp eq i32 %and, 0
ret i1 %r
}
; Not fold
-define i1 @scalar_lshr_and_signbit_eq_extra_use_lshr_and(i32 %x, i32 %y, i32 %z, i32* %p, i32* %q) {
+define i1 @scalar_lshr_and_signbit_eq_extra_use_lshr_and(i32 %x, i32 %y, i32 %z, ptr %p, ptr %q) {
; CHECK-LABEL: @scalar_lshr_and_signbit_eq_extra_use_lshr_and(
; CHECK-NEXT: [[LSHR:%.*]] = lshr i32 [[X:%.*]], [[Y:%.*]]
; CHECK-NEXT: [[AND:%.*]] = and i32 [[LSHR]], -2147483648
-; CHECK-NEXT: store i32 [[AND]], i32* [[P:%.*]], align 4
+; CHECK-NEXT: store i32 [[AND]], ptr [[P:%.*]], align 4
; CHECK-NEXT: [[ADD:%.*]] = add i32 [[LSHR]], [[Z:%.*]]
-; CHECK-NEXT: store i32 [[ADD]], i32* [[Q:%.*]], align 4
+; CHECK-NEXT: store i32 [[ADD]], ptr [[Q:%.*]], align 4
; CHECK-NEXT: [[R:%.*]] = icmp eq i32 [[AND]], 0
; CHECK-NEXT: ret i1 [[R]]
;
%lshr = lshr i32 %x, %y
%and = and i32 %lshr, 2147483648
- store i32 %and, i32* %p ; extra use of and
+ store i32 %and, ptr %p ; extra use of and
%add = add i32 %lshr, %z ; extra use of lshr
- store i32 %add, i32* %q
+ store i32 %add, ptr %q
%r = icmp eq i32 %and, 0
ret i1 %r
}
; bits in the operand which might be non-zero will be shifted
; off the end.
-define i32 @hash_string(i8* nocapture %key) nounwind readonly {
+define i32 @hash_string(ptr nocapture %key) nounwind readonly {
; CHECK-LABEL: @hash_string(
; CHECK-NEXT: entry:
-; CHECK-NEXT: [[T0:%.*]] = load i8, i8* [[KEY:%.*]], align 1
+; CHECK-NEXT: [[T0:%.*]] = load i8, ptr [[KEY:%.*]], align 1
; CHECK-NEXT: [[T1:%.*]] = icmp eq i8 [[T0]], 0
; CHECK-NEXT: br i1 [[T1]], label [[BB2:%.*]], label [[BB:%.*]]
; CHECK: bb:
; CHECK-NEXT: [[INDVAR:%.*]] = phi i64 [ 0, [[ENTRY:%.*]] ], [ [[T:%.*]], [[BB]] ]
; CHECK-NEXT: [[K_04:%.*]] = phi i32 [ 0, [[ENTRY]] ], [ [[T8:%.*]], [[BB]] ]
-; CHECK-NEXT: [[CP_05:%.*]] = getelementptr i8, i8* [[KEY]], i64 [[INDVAR]]
+; CHECK-NEXT: [[CP_05:%.*]] = getelementptr i8, ptr [[KEY]], i64 [[INDVAR]]
; CHECK-NEXT: [[T2:%.*]] = shl nuw nsw i32 [[K_04]], 1
-; CHECK-NEXT: [[T5:%.*]] = load i8, i8* [[CP_05]], align 1
+; CHECK-NEXT: [[T5:%.*]] = load i8, ptr [[CP_05]], align 1
; CHECK-NEXT: [[T6:%.*]] = sext i8 [[T5]] to i32
; CHECK-NEXT: [[T7:%.*]] = xor i32 [[T2]], [[T6]]
; CHECK-NEXT: [[T8]] = and i32 [[T7]], 16383
; CHECK-NEXT: [[T]] = add i64 [[INDVAR]], 1
-; CHECK-NEXT: [[SCEVGEP:%.*]] = getelementptr i8, i8* [[KEY]], i64 [[T]]
-; CHECK-NEXT: [[T9:%.*]] = load i8, i8* [[SCEVGEP]], align 1
+; CHECK-NEXT: [[SCEVGEP:%.*]] = getelementptr i8, ptr [[KEY]], i64 [[T]]
+; CHECK-NEXT: [[T9:%.*]] = load i8, ptr [[SCEVGEP]], align 1
; CHECK-NEXT: [[T10:%.*]] = icmp eq i8 [[T9]], 0
; CHECK-NEXT: br i1 [[T10]], label [[BB2]], label [[BB]]
; CHECK: bb2:
; CHECK-NEXT: ret i32 [[K_0_LCSSA]]
;
entry:
- %t0 = load i8, i8* %key, align 1
+ %t0 = load i8, ptr %key, align 1
%t1 = icmp eq i8 %t0, 0
br i1 %t1, label %bb2, label %bb
bb:
%indvar = phi i64 [ 0, %entry ], [ %t, %bb ]
%k.04 = phi i32 [ 0, %entry ], [ %t8, %bb ]
- %cp.05 = getelementptr i8, i8* %key, i64 %indvar
+ %cp.05 = getelementptr i8, ptr %key, i64 %indvar
%t2 = shl i32 %k.04, 1
%t3 = lshr i32 %k.04, 14
%t4 = add i32 %t2, %t3
- %t5 = load i8, i8* %cp.05, align 1
+ %t5 = load i8, ptr %cp.05, align 1
%t6 = sext i8 %t5 to i32
%t7 = xor i32 %t6, %t4
%t8 = and i32 %t7, 16383
%t = add i64 %indvar, 1
- %scevgep = getelementptr i8, i8* %key, i64 %t
- %t9 = load i8, i8* %scevgep, align 1
+ %scevgep = getelementptr i8, ptr %key, i64 %t
+ %t9 = load i8, ptr %scevgep, align 1
%t10 = icmp eq i8 %t9, 0
br i1 %t10, label %bb2, label %bb
; Negative test - this is still worth trying to avoid srem?
-define i32 @negative_and_odd_uses(i32 %x, i32* %p) {
+define i32 @negative_and_odd_uses(i32 %x, ptr %p) {
; CHECK-LABEL: @negative_and_odd_uses(
; CHECK-NEXT: [[S:%.*]] = srem i32 [[X:%.*]], 2
-; CHECK-NEXT: store i32 [[S]], i32* [[P:%.*]], align 4
+; CHECK-NEXT: store i32 [[S]], ptr [[P:%.*]], align 4
; CHECK-NEXT: [[R:%.*]] = lshr i32 [[S]], 31
; CHECK-NEXT: ret i32 [[R]]
;
%s = srem i32 %x, 2
- store i32 %s, i32* %p
+ store i32 %s, ptr %p
%r = lshr i32 %s, 31
ret i32 %r
}
; RUN: opt < %s -debugify-each -instcombine -S > %t.ll
; RUN: diff %t.no_dbg.ll %t.ll
-declare void @free(i8*)
+declare void @free(ptr)
-define void @test12(i32* %foo) minsize {
+define void @test12(ptr %foo) minsize {
entry:
- %tobool = icmp eq i32* %foo, null
+ %tobool = icmp eq ptr %foo, null
br i1 %tobool, label %if.end, label %if.then
if.then: ; preds = %entry
- %bitcast = bitcast i32* %foo to i8*
- tail call void @free(i8* %bitcast)
+ tail call void @free(ptr %foo)
br label %if.end
if.end: ; preds = %entry, %if.then
define dso_local i32 @_Z6answeri(i32 %0) {
; CHECK-LABEL: @_Z6answeri(
-; CHECK-NEXT: [[TMP2:%.*]] = call noalias nonnull dereferenceable(80) i8* @_Znam(i64 80) #[[ATTR2:[0-9]+]]
-; CHECK-NEXT: call void @free(i8* [[TMP2]])
+; CHECK-NEXT: [[TMP2:%.*]] = call noalias nonnull dereferenceable(80) ptr @_Znam(i64 80) #[[ATTR2:[0-9]+]]
+; CHECK-NEXT: call void @free(ptr [[TMP2]])
; CHECK-NEXT: ret i32 42
;
- %2 = call noalias nonnull i8* @_Znam(i64 80) #0
- call void @free(i8* %2)
+ %2 = call noalias nonnull ptr @_Znam(i64 80) #0
+ call void @free(ptr %2)
ret i32 42
}
; when optimizing it.
define void @test_alloca() {
%1 = alloca i8
- call void @free(i8* %1)
+ call void @free(ptr %1)
ret void
}
; Function Attrs: nobuiltin allocsize(0)
-declare dso_local nonnull i8* @_Znam(i64) #1
+declare dso_local nonnull ptr @_Znam(i64) #1
; Function Attrs: nounwind
-declare dso_local void @free(i8*) allockind("free") "alloc-family"="malloc"
+declare dso_local void @free(ptr) allockind("free") "alloc-family"="malloc"
attributes #0 = { builtin allocsize(0) }
attributes #1 = { nobuiltin allocsize(0) allockind("alloc,uninitialized") "alloc-family"="_Znam" }
target datalayout = "p:32:32:32"
-define i32 @main(i32 %argc, i8** %argv) {
+define i32 @main(i32 %argc, ptr %argv) {
; CHECK-LABEL: @main(
; CHECK-NEXT: ret i32 0
;
- %c_19 = alloca i8*
- %malloc_206 = tail call i8* @malloc(i32 mul (i32 ptrtoint (i8* getelementptr (i8, i8* null, i32 1) to i32), i32 10))
- store i8* %malloc_206, i8** %c_19
- %tmp_207 = load i8*, i8** %c_19
- tail call void @free(i8* %tmp_207)
+ %c_19 = alloca ptr
+ %malloc_206 = tail call ptr @malloc(i32 mul (i32 ptrtoint (ptr getelementptr (i8, ptr null, i32 1) to i32), i32 10))
+ store ptr %malloc_206, ptr %c_19
+ %tmp_207 = load ptr, ptr %c_19
+ tail call void @free(ptr %tmp_207)
ret i32 0
}
; CHECK-LABEL: @dead_aligned_alloc(
; CHECK-NEXT: ret i32 0
;
- %aligned_allocation = tail call i8* @aligned_alloc(i32 %alignment, i32 %size)
- store i8 %value, i8* %aligned_allocation
- tail call void @free(i8* %aligned_allocation)
+ %aligned_allocation = tail call ptr @aligned_alloc(i32 %alignment, i32 %size)
+ store i8 %value, ptr %aligned_allocation
+ tail call void @free(ptr %aligned_allocation)
ret i32 0
}
-declare noalias i8* @calloc(i32, i32) nounwind allockind("alloc,zeroed") allocsize(0,1) "alloc-family"="malloc"
-declare noalias i8* @malloc(i32) allockind("alloc,uninitialized") allocsize(0) "alloc-family"="malloc"
-declare noalias i8* @aligned_alloc(i32, i32) allockind("alloc,uninitialized,aligned") allocsize(1) "alloc-family"="malloc"
-declare void @free(i8*) allockind("free") "alloc-family"="malloc"
+declare noalias ptr @calloc(i32, i32) nounwind allockind("alloc,zeroed") allocsize(0,1) "alloc-family"="malloc"
+declare noalias ptr @malloc(i32) allockind("alloc,uninitialized") allocsize(0) "alloc-family"="malloc"
+declare noalias ptr @aligned_alloc(i32, i32) allockind("alloc,uninitialized,aligned") allocsize(1) "alloc-family"="malloc"
+declare void @free(ptr) allockind("free") "alloc-family"="malloc"
define i1 @foo() {
; CHECK-LABEL: @foo(
; CHECK-NEXT: ret i1 false
;
- %m = call i8* @malloc(i32 1)
- %z = icmp eq i8* %m, null
- call void @free(i8* %m)
+ %m = call ptr @malloc(i32 1)
+ %z = icmp eq ptr %m, null
+ call void @free(ptr %m)
ret i1 %z
}
-declare void @llvm.lifetime.start.p0i8(i64, i8*)
-declare void @llvm.lifetime.end.p0i8(i64, i8*)
-declare i64 @llvm.objectsize.i64(i8*, i1)
-declare void @llvm.memcpy.p0i8.p0i8.i32(i8* nocapture, i8* nocapture, i32, i1) nounwind
-declare void @llvm.memmove.p0i8.p0i8.i32(i8* nocapture, i8* nocapture, i32, i1) nounwind
-declare void @llvm.memset.p0i8.i32(i8*, i8, i32, i1) nounwind
+declare void @llvm.lifetime.start.p0(i64, ptr)
+declare void @llvm.lifetime.end.p0(i64, ptr)
+declare i64 @llvm.objectsize.i64(ptr, i1)
+declare void @llvm.memcpy.p0.p0.i32(ptr nocapture, ptr nocapture, i32, i1) nounwind
+declare void @llvm.memmove.p0.p0.i32(ptr nocapture, ptr nocapture, i32, i1) nounwind
+declare void @llvm.memset.p0.i32(ptr, i8, i32, i1) nounwind
-define void @test3(i8* %src) {
+define void @test3(ptr %src) {
; CHECK-LABEL: @test3(
; CHECK-NEXT: ret void
;
- %a = call noalias i8* @malloc(i32 10)
- call void @llvm.lifetime.start.p0i8(i64 10, i8* %a)
- call void @llvm.lifetime.end.p0i8(i64 10, i8* %a)
- %size = call i64 @llvm.objectsize.i64(i8* %a, i1 true)
- store i8 42, i8* %a
- call void @llvm.memcpy.p0i8.p0i8.i32(i8* %a, i8* %src, i32 32, i1 false)
- call void @llvm.memmove.p0i8.p0i8.i32(i8* %a, i8* %src, i32 32, i1 false)
- call void @llvm.memset.p0i8.i32(i8* %a, i8 5, i32 32, i1 false)
- %alloc2 = call noalias i8* @calloc(i32 5, i32 7) nounwind
- %z = icmp ne i8* %alloc2, null
+ %a = call noalias ptr @malloc(i32 10)
+ call void @llvm.lifetime.start.p0(i64 10, ptr %a)
+ call void @llvm.lifetime.end.p0(i64 10, ptr %a)
+ %size = call i64 @llvm.objectsize.i64(ptr %a, i1 true)
+ store i8 42, ptr %a
+ call void @llvm.memcpy.p0.p0.i32(ptr %a, ptr %src, i32 32, i1 false)
+ call void @llvm.memmove.p0.p0.i32(ptr %a, ptr %src, i32 32, i1 false)
+ call void @llvm.memset.p0.i32(ptr %a, i8 5, i32 32, i1 false)
+ %alloc2 = call noalias ptr @calloc(i32 5, i32 7) nounwind
+ %z = icmp ne ptr %alloc2, null
ret void
}
; CHECK-LABEL: @test4(
; CHECK-NEXT: ret void
;
- %A = call i8* @malloc(i32 16000)
- %B = bitcast i8* %A to double*
- %C = bitcast double* %B to i8*
- call void @free(i8* %C)
+ %A = call ptr @malloc(i32 16000)
+ call void @free(ptr %A)
ret void
}
-define void @test5(i8* %ptr, i8** %esc) {
+define void @test5(ptr %ptr, ptr %esc) {
; CHECK-LABEL: @test5(
-; CHECK-NEXT: [[A:%.*]] = call dereferenceable_or_null(700) i8* @malloc(i32 700)
-; CHECK-NEXT: [[B:%.*]] = call dereferenceable_or_null(700) i8* @malloc(i32 700)
-; CHECK-NEXT: [[C:%.*]] = call dereferenceable_or_null(700) i8* @malloc(i32 700)
-; CHECK-NEXT: [[D:%.*]] = call dereferenceable_or_null(700) i8* @malloc(i32 700)
-; CHECK-NEXT: [[E:%.*]] = call dereferenceable_or_null(700) i8* @malloc(i32 700)
-; CHECK-NEXT: [[F:%.*]] = call dereferenceable_or_null(700) i8* @malloc(i32 700)
-; CHECK-NEXT: [[G:%.*]] = call dereferenceable_or_null(700) i8* @malloc(i32 700)
-; CHECK-NEXT: call void @llvm.memcpy.p0i8.p0i8.i32(i8* noundef nonnull align 1 dereferenceable(32) [[PTR:%.*]], i8* noundef nonnull align 1 dereferenceable(32) [[A]], i32 32, i1 false)
-; CHECK-NEXT: call void @llvm.memmove.p0i8.p0i8.i32(i8* noundef nonnull align 1 dereferenceable(32) [[PTR]], i8* noundef nonnull align 1 dereferenceable(32) [[B]], i32 32, i1 false)
-; CHECK-NEXT: store i8* [[C]], i8** [[ESC:%.*]], align 4
-; CHECK-NEXT: call void @llvm.memcpy.p0i8.p0i8.i32(i8* [[D]], i8* [[PTR]], i32 32, i1 true)
-; CHECK-NEXT: call void @llvm.memmove.p0i8.p0i8.i32(i8* [[E]], i8* [[PTR]], i32 32, i1 true)
-; CHECK-NEXT: call void @llvm.memset.p0i8.i32(i8* [[F]], i8 5, i32 32, i1 true)
-; CHECK-NEXT: store volatile i8 4, i8* [[G]], align 1
+; CHECK-NEXT: [[A:%.*]] = call dereferenceable_or_null(700) ptr @malloc(i32 700)
+; CHECK-NEXT: [[B:%.*]] = call dereferenceable_or_null(700) ptr @malloc(i32 700)
+; CHECK-NEXT: [[C:%.*]] = call dereferenceable_or_null(700) ptr @malloc(i32 700)
+; CHECK-NEXT: [[D:%.*]] = call dereferenceable_or_null(700) ptr @malloc(i32 700)
+; CHECK-NEXT: [[E:%.*]] = call dereferenceable_or_null(700) ptr @malloc(i32 700)
+; CHECK-NEXT: [[F:%.*]] = call dereferenceable_or_null(700) ptr @malloc(i32 700)
+; CHECK-NEXT: [[G:%.*]] = call dereferenceable_or_null(700) ptr @malloc(i32 700)
+; CHECK-NEXT: call void @llvm.memcpy.p0.p0.i32(ptr noundef nonnull align 1 dereferenceable(32) [[PTR:%.*]], ptr noundef nonnull align 1 dereferenceable(32) [[A]], i32 32, i1 false)
+; CHECK-NEXT: call void @llvm.memmove.p0.p0.i32(ptr noundef nonnull align 1 dereferenceable(32) [[PTR]], ptr noundef nonnull align 1 dereferenceable(32) [[B]], i32 32, i1 false)
+; CHECK-NEXT: store ptr [[C]], ptr [[ESC:%.*]], align 4
+; CHECK-NEXT: call void @llvm.memcpy.p0.p0.i32(ptr [[D]], ptr [[PTR]], i32 32, i1 true)
+; CHECK-NEXT: call void @llvm.memmove.p0.p0.i32(ptr [[E]], ptr [[PTR]], i32 32, i1 true)
+; CHECK-NEXT: call void @llvm.memset.p0.i32(ptr [[F]], i8 5, i32 32, i1 true)
+; CHECK-NEXT: store volatile i8 4, ptr [[G]], align 1
; CHECK-NEXT: ret void
;
- %a = call i8* @malloc(i32 700)
- %b = call i8* @malloc(i32 700)
- %c = call i8* @malloc(i32 700)
- %d = call i8* @malloc(i32 700)
- %e = call i8* @malloc(i32 700)
- %f = call i8* @malloc(i32 700)
- %g = call i8* @malloc(i32 700)
- call void @llvm.memcpy.p0i8.p0i8.i32(i8* %ptr, i8* %a, i32 32, i1 false)
- call void @llvm.memmove.p0i8.p0i8.i32(i8* %ptr, i8* %b, i32 32, i1 false)
- store i8* %c, i8** %esc
- call void @llvm.memcpy.p0i8.p0i8.i32(i8* %d, i8* %ptr, i32 32, i1 true)
- call void @llvm.memmove.p0i8.p0i8.i32(i8* %e, i8* %ptr, i32 32, i1 true)
- call void @llvm.memset.p0i8.i32(i8* %f, i8 5, i32 32, i1 true)
- store volatile i8 4, i8* %g
+ %a = call ptr @malloc(i32 700)
+ %b = call ptr @malloc(i32 700)
+ %c = call ptr @malloc(i32 700)
+ %d = call ptr @malloc(i32 700)
+ %e = call ptr @malloc(i32 700)
+ %f = call ptr @malloc(i32 700)
+ %g = call ptr @malloc(i32 700)
+ call void @llvm.memcpy.p0.p0.i32(ptr %ptr, ptr %a, i32 32, i1 false)
+ call void @llvm.memmove.p0.p0.i32(ptr %ptr, ptr %b, i32 32, i1 false)
+ store ptr %c, ptr %esc
+ call void @llvm.memcpy.p0.p0.i32(ptr %d, ptr %ptr, i32 32, i1 true)
+ call void @llvm.memmove.p0.p0.i32(ptr %e, ptr %ptr, i32 32, i1 true)
+ call void @llvm.memset.p0.i32(ptr %f, i8 5, i32 32, i1 true)
+ store volatile i8 4, ptr %g
ret void
}
;; Using simplifycfg will remove the empty basic block and the branch operation
;; Then, performing a dead elimination will remove the comparison.
;; This is what happens with -O1 and upper.
-define void @test6(i8* %foo) minsize {
+define void @test6(ptr %foo) minsize {
; CHECK-LABEL: @test6(
; CHECK-NEXT: entry:
-; CHECK-NEXT: [[TOBOOL:%.*]] = icmp eq i8* [[FOO:%.*]], null
-; CHECK-NEXT: tail call void @free(i8* [[FOO]])
+; CHECK-NEXT: [[TOBOOL:%.*]] = icmp eq ptr [[FOO:%.*]], null
+; CHECK-NEXT: tail call void @free(ptr [[FOO]])
; CHECK-NEXT: br i1 [[TOBOOL]], label [[IF_END:%.*]], label [[IF_THEN:%.*]]
; CHECK: if.then:
; CHECK-NEXT: br label [[IF_END]]
;; Call to free moved
;; Block is now empty and may be simplified by simplifycfg
entry:
- %tobool = icmp eq i8* %foo, null
+ %tobool = icmp eq ptr %foo, null
br i1 %tobool, label %if.end, label %if.then
if.then: ; preds = %entry
- tail call void @free(i8* %foo)
+ tail call void @free(ptr %foo)
br label %if.end
if.end: ; preds = %entry, %if.then
;; Check that the optimization that moves a call to free in its predecessor
;; block (see test6) also happens when noop casts are involved.
-define void @test12(i32* %foo) minsize {
+define void @test12(ptr %foo) minsize {
; CHECK-LABEL: @test12(
; CHECK-NEXT: entry:
-; CHECK-NEXT: [[TOBOOL:%.*]] = icmp eq i32* [[FOO:%.*]], null
-; CHECK-NEXT: [[BITCAST:%.*]] = bitcast i32* [[FOO]] to i8*
-; CHECK-NEXT: tail call void @free(i8* [[BITCAST]])
+; CHECK-NEXT: [[TOBOOL:%.*]] = icmp eq ptr [[FOO:%.*]], null
+; CHECK-NEXT: tail call void @free(ptr [[FOO]])
; CHECK-NEXT: br i1 [[TOBOOL]], label [[IF_END:%.*]], label [[IF_THEN:%.*]]
; CHECK: if.then:
; CHECK-NEXT: br label [[IF_END]]
;; Call to free moved
;; Block is now empty and may be simplified by simplifycfg
entry:
- %tobool = icmp eq i32* %foo, null
+ %tobool = icmp eq ptr %foo, null
br i1 %tobool, label %if.end, label %if.then
if.then: ; preds = %entry
- %bitcast = bitcast i32* %foo to i8*
- tail call void @free(i8* %bitcast)
+ tail call void @free(ptr %foo)
br label %if.end
if.end: ; preds = %entry, %if.then
;; Test that nonnull-implying attributes on the parameter are adjusted when the
;; call is moved, since they may no longer be valid and result in miscompiles if
;; kept unchanged.
-define void @test_nonnull_free_move(i8* %foo) minsize {
+define void @test_nonnull_free_move(ptr %foo) minsize {
; CHECK-LABEL: @test_nonnull_free_move(
; CHECK-NEXT: entry:
-; CHECK-NEXT: [[TOBOOL:%.*]] = icmp eq i8* [[FOO:%.*]], null
-; CHECK-NEXT: tail call void @free(i8* [[FOO]])
+; CHECK-NEXT: [[TOBOOL:%.*]] = icmp eq ptr [[FOO:%.*]], null
+; CHECK-NEXT: tail call void @free(ptr [[FOO]])
; CHECK-NEXT: br i1 [[TOBOOL]], label [[IF_END:%.*]], label [[IF_THEN:%.*]]
; CHECK: if.then:
; CHECK-NEXT: br label [[IF_END]]
; CHECK-NEXT: ret void
;
entry:
- %tobool = icmp eq i8* %foo, null
+ %tobool = icmp eq ptr %foo, null
br i1 %tobool, label %if.end, label %if.then
if.then: ; preds = %entry
- tail call void @free(i8* nonnull %foo)
+ tail call void @free(ptr nonnull %foo)
br label %if.end
if.end: ; preds = %entry, %if.then
ret void
}
-define void @test_dereferenceable_free_move(i8* %foo) minsize {
+define void @test_dereferenceable_free_move(ptr %foo) minsize {
; CHECK-LABEL: @test_dereferenceable_free_move(
; CHECK-NEXT: entry:
-; CHECK-NEXT: [[TOBOOL:%.*]] = icmp eq i8* [[FOO:%.*]], null
-; CHECK-NEXT: tail call void @free(i8* dereferenceable_or_null(4) [[FOO]])
+; CHECK-NEXT: [[TOBOOL:%.*]] = icmp eq ptr [[FOO:%.*]], null
+; CHECK-NEXT: tail call void @free(ptr dereferenceable_or_null(4) [[FOO]])
; CHECK-NEXT: br i1 [[TOBOOL]], label [[IF_END:%.*]], label [[IF_THEN:%.*]]
; CHECK: if.then:
; CHECK-NEXT: br label [[IF_END]]
; CHECK-NEXT: ret void
;
entry:
- %tobool = icmp eq i8* %foo, null
+ %tobool = icmp eq ptr %foo, null
br i1 %tobool, label %if.end, label %if.then
if.then: ; preds = %entry
- tail call void @free(i8* dereferenceable(4) %foo)
+ tail call void @free(ptr dereferenceable(4) %foo)
br label %if.end
if.end: ; preds = %entry, %if.then
ret void
}
-define void @test_nonnull_dereferenceable_free_move(i8* %foo) minsize {
+define void @test_nonnull_dereferenceable_free_move(ptr %foo) minsize {
; CHECK-LABEL: @test_nonnull_dereferenceable_free_move(
; CHECK-NEXT: entry:
-; CHECK-NEXT: [[TOBOOL:%.*]] = icmp eq i8* [[FOO:%.*]], null
-; CHECK-NEXT: tail call void @free(i8* dereferenceable_or_null(16) [[FOO]])
+; CHECK-NEXT: [[TOBOOL:%.*]] = icmp eq ptr [[FOO:%.*]], null
+; CHECK-NEXT: tail call void @free(ptr dereferenceable_or_null(16) [[FOO]])
; CHECK-NEXT: br i1 [[TOBOOL]], label [[IF_END:%.*]], label [[IF_THEN:%.*]]
; CHECK: if.then:
; CHECK-NEXT: br label [[IF_END]]
; CHECK-NEXT: ret void
;
entry:
- %tobool = icmp eq i8* %foo, null
+ %tobool = icmp eq ptr %foo, null
br i1 %tobool, label %if.end, label %if.then
if.then: ; preds = %entry
- tail call void @free(i8* nonnull dereferenceable(16) %foo)
+ tail call void @free(ptr nonnull dereferenceable(16) %foo)
br label %if.end
if.end: ; preds = %entry, %if.then
; been reverted once due to difficult to isolate fallout.
; TODO: Freeing a no-free pointer -> %foo must be null
-define void @test13(i8* nofree %foo) {
+define void @test13(ptr nofree %foo) {
; CHECK-LABEL: @test13(
-; CHECK-NEXT: call void @free(i8* [[FOO:%.*]])
+; CHECK-NEXT: call void @free(ptr [[FOO:%.*]])
; CHECK-NEXT: ret void
;
- call void @free(i8* %foo)
+ call void @free(ptr %foo)
ret void
}
; TODO: Freeing a no-free pointer -> %foo must be null
-define void @test14(i8* %foo) nofree {
+define void @test14(ptr %foo) nofree {
; CHECK-LABEL: @test14(
-; CHECK-NEXT: call void @free(i8* [[FOO:%.*]])
+; CHECK-NEXT: call void @free(ptr [[FOO:%.*]])
; CHECK-NEXT: ret void
;
- call void @free(i8* %foo)
+ call void @free(ptr %foo)
ret void
}
; TODO: free call marked no-free -> %foo must be null
-define void @test15(i8* %foo) {
+define void @test15(ptr %foo) {
; CHECK-LABEL: @test15(
-; CHECK-NEXT: call void @free(i8* [[FOO:%.*]]) #[[ATTR5:[0-9]+]]
+; CHECK-NEXT: call void @free(ptr [[FOO:%.*]]) #[[ATTR5:[0-9]+]]
; CHECK-NEXT: ret void
;
- call void @free(i8* %foo) nofree
+ call void @free(ptr %foo) nofree
ret void
}
; TODO: freeing a nonnull nofree pointer -> full UB
-define void @test16(i8* nonnull nofree %foo) {
+define void @test16(ptr nonnull nofree %foo) {
; CHECK-LABEL: @test16(
-; CHECK-NEXT: call void @free(i8* [[FOO:%.*]])
+; CHECK-NEXT: call void @free(ptr [[FOO:%.*]])
; CHECK-NEXT: ret void
;
- call void @free(i8* %foo)
+ call void @free(ptr %foo)
ret void
}
target triple = "nvptx64"
-declare void @user(i8*)
-declare i8* @malloc(i64) allockind("alloc,uninitialized") "alloc-family"="malloc" allocsize(0)
-declare void @free(i8*) allockind("free") "alloc-family"="malloc"
+declare void @user(ptr)
+declare ptr @malloc(i64) allockind("alloc,uninitialized") "alloc-family"="malloc" allocsize(0)
+declare void @free(ptr) allockind("free") "alloc-family"="malloc"
; Ensure the nvptx backend states malloc & free are a thing so we can recognize
; so we will optimize them properly. In the test below the malloc-free chain is
; CHECK-LABEL: @malloc_then_free_not_needed(
; CHECK-NEXT: ret void
;
- %a = call i8* @malloc(i64 4)
- store i8 0, i8* %a
- call void @free(i8* %a)
+ %a = call ptr @malloc(i64 4)
+ store i8 0, ptr %a
+ call void @free(ptr %a)
ret void
}
define void @malloc_then_free_needed() {
; CHECK-LABEL: @malloc_then_free_needed(
-; CHECK-NEXT: [[A:%.*]] = call dereferenceable_or_null(4) i8* @malloc(i64 4)
-; CHECK-NEXT: call void @user(i8* [[A]])
-; CHECK-NEXT: call void @free(i8* [[A]])
+; CHECK-NEXT: [[A:%.*]] = call dereferenceable_or_null(4) ptr @malloc(i64 4)
+; CHECK-NEXT: call void @user(ptr [[A]])
+; CHECK-NEXT: call void @free(ptr [[A]])
; CHECK-NEXT: ret void
;
- %a = call i8* @malloc(i64 4)
- call void @user(i8* %a)
- call void @free(i8* %a)
+ %a = call ptr @malloc(i64 4)
+ call void @user(ptr %a)
+ call void @free(ptr %a)
ret void
}
; NOTE: Assertions have been autogenerated by utils/update_test_checks.py
; RUN: opt -passes=instcombine -S < %s | FileCheck %s
-declare <2 x double> @llvm.masked.load.v2f64.p0v2f64(<2 x double>* %ptrs, i32, <2 x i1> %mask, <2 x double> %src0)
-declare void @llvm.masked.store.v2f64.p0v2f64(<2 x double> %val, <2 x double>* %ptrs, i32, <2 x i1> %mask)
-declare <2 x double> @llvm.masked.gather.v2f64.v2p0f64(<2 x double*> %ptrs, i32, <2 x i1> %mask, <2 x double> %passthru)
-declare <4 x double> @llvm.masked.gather.v4f64.v4p0f64(<4 x double*> %ptrs, i32, <4 x i1> %mask, <4 x double> %passthru)
-declare void @llvm.masked.scatter.v2f64.v2p0f64(<2 x double> %val, <2 x double*> %ptrs, i32, <2 x i1> %mask)
+declare <2 x double> @llvm.masked.load.v2f64.p0(ptr %ptrs, i32, <2 x i1> %mask, <2 x double> %src0)
+declare void @llvm.masked.store.v2f64.p0(<2 x double> %val, ptr %ptrs, i32, <2 x i1> %mask)
+declare <2 x double> @llvm.masked.gather.v2f64.v2p0(<2 x ptr> %ptrs, i32, <2 x i1> %mask, <2 x double> %passthru)
+declare <4 x double> @llvm.masked.gather.v4f64.v4p0(<4 x ptr> %ptrs, i32, <4 x i1> %mask, <4 x double> %passthru)
+declare void @llvm.masked.scatter.v2f64.v2p0(<2 x double> %val, <2 x ptr> %ptrs, i32, <2 x i1> %mask)
-define <2 x double> @load_zeromask(<2 x double>* %ptr, <2 x double> %passthru) {
+define <2 x double> @load_zeromask(ptr %ptr, <2 x double> %passthru) {
; CHECK-LABEL: @load_zeromask(
; CHECK-NEXT: ret <2 x double> [[PASSTHRU:%.*]]
;
- %res = call <2 x double> @llvm.masked.load.v2f64.p0v2f64(<2 x double>* %ptr, i32 1, <2 x i1> zeroinitializer, <2 x double> %passthru)
+ %res = call <2 x double> @llvm.masked.load.v2f64.p0(ptr %ptr, i32 1, <2 x i1> zeroinitializer, <2 x double> %passthru)
ret <2 x double> %res
}
-define <2 x double> @load_onemask(<2 x double>* %ptr, <2 x double> %passthru) {
+define <2 x double> @load_onemask(ptr %ptr, <2 x double> %passthru) {
; CHECK-LABEL: @load_onemask(
-; CHECK-NEXT: [[UNMASKEDLOAD:%.*]] = load <2 x double>, <2 x double>* [[PTR:%.*]], align 2
+; CHECK-NEXT: [[UNMASKEDLOAD:%.*]] = load <2 x double>, ptr [[PTR:%.*]], align 2
; CHECK-NEXT: ret <2 x double> [[UNMASKEDLOAD]]
;
- %res = call <2 x double> @llvm.masked.load.v2f64.p0v2f64(<2 x double>* %ptr, i32 2, <2 x i1> <i1 1, i1 1>, <2 x double> %passthru)
+ %res = call <2 x double> @llvm.masked.load.v2f64.p0(ptr %ptr, i32 2, <2 x i1> <i1 1, i1 1>, <2 x double> %passthru)
ret <2 x double> %res
}
-define <2 x double> @load_undefmask(<2 x double>* %ptr, <2 x double> %passthru) {
+define <2 x double> @load_undefmask(ptr %ptr, <2 x double> %passthru) {
; CHECK-LABEL: @load_undefmask(
-; CHECK-NEXT: [[UNMASKEDLOAD:%.*]] = load <2 x double>, <2 x double>* [[PTR:%.*]], align 2
+; CHECK-NEXT: [[UNMASKEDLOAD:%.*]] = load <2 x double>, ptr [[PTR:%.*]], align 2
; CHECK-NEXT: ret <2 x double> [[UNMASKEDLOAD]]
;
- %res = call <2 x double> @llvm.masked.load.v2f64.p0v2f64(<2 x double>* %ptr, i32 2, <2 x i1> <i1 1, i1 undef>, <2 x double> %passthru)
+ %res = call <2 x double> @llvm.masked.load.v2f64.p0(ptr %ptr, i32 2, <2 x i1> <i1 1, i1 undef>, <2 x double> %passthru)
ret <2 x double> %res
}
@G = external global i8
-define <2 x double> @load_cemask(<2 x double>* %ptr, <2 x double> %passthru) {
+define <2 x double> @load_cemask(ptr %ptr, <2 x double> %passthru) {
; CHECK-LABEL: @load_cemask(
-; CHECK-NEXT: [[RES:%.*]] = call <2 x double> @llvm.masked.load.v2f64.p0v2f64(<2 x double>* [[PTR:%.*]], i32 2, <2 x i1> <i1 true, i1 ptrtoint (i8* @G to i1)>, <2 x double> [[PASSTHRU:%.*]])
+; CHECK-NEXT: [[RES:%.*]] = call <2 x double> @llvm.masked.load.v2f64.p0(ptr [[PTR:%.*]], i32 2, <2 x i1> <i1 true, i1 ptrtoint (ptr @G to i1)>, <2 x double> [[PASSTHRU:%.*]])
; CHECK-NEXT: ret <2 x double> [[RES]]
;
- %res = call <2 x double> @llvm.masked.load.v2f64.p0v2f64(<2 x double>* %ptr, i32 2, <2 x i1> <i1 1, i1 ptrtoint (i8* @G to i1)>, <2 x double> %passthru)
+ %res = call <2 x double> @llvm.masked.load.v2f64.p0(ptr %ptr, i32 2, <2 x i1> <i1 1, i1 ptrtoint (ptr @G to i1)>, <2 x double> %passthru)
ret <2 x double> %res
}
-define <2 x double> @load_lane0(<2 x double>* %ptr, double %pt) {
+define <2 x double> @load_lane0(ptr %ptr, double %pt) {
; CHECK-LABEL: @load_lane0(
; CHECK-NEXT: [[PTV2:%.*]] = insertelement <2 x double> poison, double [[PT:%.*]], i64 1
-; CHECK-NEXT: [[RES:%.*]] = call <2 x double> @llvm.masked.load.v2f64.p0v2f64(<2 x double>* [[PTR:%.*]], i32 2, <2 x i1> <i1 true, i1 false>, <2 x double> [[PTV2]])
+; CHECK-NEXT: [[RES:%.*]] = call <2 x double> @llvm.masked.load.v2f64.p0(ptr [[PTR:%.*]], i32 2, <2 x i1> <i1 true, i1 false>, <2 x double> [[PTV2]])
; CHECK-NEXT: ret <2 x double> [[RES]]
;
%ptv1 = insertelement <2 x double> poison, double %pt, i64 0
%ptv2 = insertelement <2 x double> %ptv1, double %pt, i64 1
- %res = call <2 x double> @llvm.masked.load.v2f64.p0v2f64(<2 x double>* %ptr, i32 2, <2 x i1> <i1 true, i1 false>, <2 x double> %ptv2)
+ %res = call <2 x double> @llvm.masked.load.v2f64.p0(ptr %ptr, i32 2, <2 x i1> <i1 true, i1 false>, <2 x double> %ptv2)
ret <2 x double> %res
}
-define double @load_all(double* %base, double %pt) {
+define double @load_all(ptr %base, double %pt) {
; CHECK-LABEL: @load_all(
-; CHECK-NEXT: [[PTRS:%.*]] = getelementptr double, double* [[BASE:%.*]], <4 x i64> <i64 0, i64 poison, i64 2, i64 3>
-; CHECK-NEXT: [[RES:%.*]] = call <4 x double> @llvm.masked.gather.v4f64.v4p0f64(<4 x double*> [[PTRS]], i32 4, <4 x i1> <i1 true, i1 false, i1 true, i1 true>, <4 x double> undef)
+; CHECK-NEXT: [[PTRS:%.*]] = getelementptr double, ptr [[BASE:%.*]], <4 x i64> <i64 0, i64 poison, i64 2, i64 3>
+; CHECK-NEXT: [[RES:%.*]] = call <4 x double> @llvm.masked.gather.v4f64.v4p0(<4 x ptr> [[PTRS]], i32 4, <4 x i1> <i1 true, i1 false, i1 true, i1 true>, <4 x double> undef)
; CHECK-NEXT: [[ELT:%.*]] = extractelement <4 x double> [[RES]], i64 2
; CHECK-NEXT: ret double [[ELT]]
;
- %ptrs = getelementptr double, double* %base, <4 x i64> <i64 0, i64 1, i64 2, i64 3>
- %res = call <4 x double> @llvm.masked.gather.v4f64.v4p0f64(<4 x double*> %ptrs, i32 4, <4 x i1> <i1 true, i1 false, i1 true, i1 true>, <4 x double> undef)
+ %ptrs = getelementptr double, ptr %base, <4 x i64> <i64 0, i64 1, i64 2, i64 3>
+ %res = call <4 x double> @llvm.masked.gather.v4f64.v4p0(<4 x ptr> %ptrs, i32 4, <4 x i1> <i1 true, i1 false, i1 true, i1 true>, <4 x double> undef)
%elt = extractelement <4 x double> %res, i64 2
ret double %elt
}
-define <2 x double> @load_generic(<2 x double>* %ptr, double %pt, <2 x i1> %mask) {
+define <2 x double> @load_generic(ptr %ptr, double %pt, <2 x i1> %mask) {
; CHECK-LABEL: @load_generic(
; CHECK-NEXT: [[PTV1:%.*]] = insertelement <2 x double> poison, double [[PT:%.*]], i64 0
; CHECK-NEXT: [[PTV2:%.*]] = shufflevector <2 x double> [[PTV1]], <2 x double> poison, <2 x i32> zeroinitializer
-; CHECK-NEXT: [[RES:%.*]] = call <2 x double> @llvm.masked.load.v2f64.p0v2f64(<2 x double>* [[PTR:%.*]], i32 4, <2 x i1> [[MASK:%.*]], <2 x double> [[PTV2]])
+; CHECK-NEXT: [[RES:%.*]] = call <2 x double> @llvm.masked.load.v2f64.p0(ptr [[PTR:%.*]], i32 4, <2 x i1> [[MASK:%.*]], <2 x double> [[PTV2]])
; CHECK-NEXT: ret <2 x double> [[RES]]
;
%ptv1 = insertelement <2 x double> poison, double %pt, i64 0
%ptv2 = insertelement <2 x double> %ptv1, double %pt, i64 1
- %res = call <2 x double> @llvm.masked.load.v2f64.p0v2f64(<2 x double>* %ptr, i32 4, <2 x i1> %mask, <2 x double> %ptv2)
+ %res = call <2 x double> @llvm.masked.load.v2f64.p0(ptr %ptr, i32 4, <2 x i1> %mask, <2 x double> %ptv2)
ret <2 x double> %res
}
-define <2 x double> @load_speculative(<2 x double>* dereferenceable(16) align 4 %ptr, double %pt, <2 x i1> %mask) nofree nosync {
+define <2 x double> @load_speculative(ptr dereferenceable(16) align 4 %ptr, double %pt, <2 x i1> %mask) nofree nosync {
; CHECK-LABEL: @load_speculative(
; CHECK-NEXT: [[PTV1:%.*]] = insertelement <2 x double> poison, double [[PT:%.*]], i64 0
; CHECK-NEXT: [[PTV2:%.*]] = shufflevector <2 x double> [[PTV1]], <2 x double> poison, <2 x i32> zeroinitializer
-; CHECK-NEXT: [[UNMASKEDLOAD:%.*]] = load <2 x double>, <2 x double>* [[PTR:%.*]], align 4
+; CHECK-NEXT: [[UNMASKEDLOAD:%.*]] = load <2 x double>, ptr [[PTR:%.*]], align 4
; CHECK-NEXT: [[TMP1:%.*]] = select <2 x i1> [[MASK:%.*]], <2 x double> [[UNMASKEDLOAD]], <2 x double> [[PTV2]]
; CHECK-NEXT: ret <2 x double> [[TMP1]]
;
%ptv1 = insertelement <2 x double> poison, double %pt, i64 0
%ptv2 = insertelement <2 x double> %ptv1, double %pt, i64 1
- %res = call <2 x double> @llvm.masked.load.v2f64.p0v2f64(<2 x double>* %ptr, i32 4, <2 x i1> %mask, <2 x double> %ptv2)
+ %res = call <2 x double> @llvm.masked.load.v2f64.p0(ptr %ptr, i32 4, <2 x i1> %mask, <2 x double> %ptv2)
ret <2 x double> %res
}
-define <2 x double> @load_speculative_less_aligned(<2 x double>* dereferenceable(16) %ptr, double %pt, <2 x i1> %mask) nofree nosync {
+define <2 x double> @load_speculative_less_aligned(ptr dereferenceable(16) %ptr, double %pt, <2 x i1> %mask) nofree nosync {
; CHECK-LABEL: @load_speculative_less_aligned(
; CHECK-NEXT: [[PTV1:%.*]] = insertelement <2 x double> poison, double [[PT:%.*]], i64 0
; CHECK-NEXT: [[PTV2:%.*]] = shufflevector <2 x double> [[PTV1]], <2 x double> poison, <2 x i32> zeroinitializer
-; CHECK-NEXT: [[UNMASKEDLOAD:%.*]] = load <2 x double>, <2 x double>* [[PTR:%.*]], align 4
+; CHECK-NEXT: [[UNMASKEDLOAD:%.*]] = load <2 x double>, ptr [[PTR:%.*]], align 4
; CHECK-NEXT: [[TMP1:%.*]] = select <2 x i1> [[MASK:%.*]], <2 x double> [[UNMASKEDLOAD]], <2 x double> [[PTV2]]
; CHECK-NEXT: ret <2 x double> [[TMP1]]
;
%ptv1 = insertelement <2 x double> poison, double %pt, i64 0
%ptv2 = insertelement <2 x double> %ptv1, double %pt, i64 1
- %res = call <2 x double> @llvm.masked.load.v2f64.p0v2f64(<2 x double>* %ptr, i32 4, <2 x i1> %mask, <2 x double> %ptv2)
+ %res = call <2 x double> @llvm.masked.load.v2f64.p0(ptr %ptr, i32 4, <2 x i1> %mask, <2 x double> %ptv2)
ret <2 x double> %res
}
; Can't speculate since only half of required size is known deref
-define <2 x double> @load_spec_neg_size(<2 x double>* dereferenceable(8) %ptr, double %pt, <2 x i1> %mask) nofree nosync {
+define <2 x double> @load_spec_neg_size(ptr dereferenceable(8) %ptr, double %pt, <2 x i1> %mask) nofree nosync {
; CHECK-LABEL: @load_spec_neg_size(
; CHECK-NEXT: [[PTV1:%.*]] = insertelement <2 x double> poison, double [[PT:%.*]], i64 0
; CHECK-NEXT: [[PTV2:%.*]] = shufflevector <2 x double> [[PTV1]], <2 x double> poison, <2 x i32> zeroinitializer
-; CHECK-NEXT: [[RES:%.*]] = call <2 x double> @llvm.masked.load.v2f64.p0v2f64(<2 x double>* nonnull [[PTR:%.*]], i32 4, <2 x i1> [[MASK:%.*]], <2 x double> [[PTV2]])
+; CHECK-NEXT: [[RES:%.*]] = call <2 x double> @llvm.masked.load.v2f64.p0(ptr nonnull [[PTR:%.*]], i32 4, <2 x i1> [[MASK:%.*]], <2 x double> [[PTV2]])
; CHECK-NEXT: ret <2 x double> [[RES]]
;
%ptv1 = insertelement <2 x double> poison, double %pt, i64 0
%ptv2 = insertelement <2 x double> %ptv1, double %pt, i64 1
- %res = call <2 x double> @llvm.masked.load.v2f64.p0v2f64(<2 x double>* %ptr, i32 4, <2 x i1> %mask, <2 x double> %ptv2)
+ %res = call <2 x double> @llvm.masked.load.v2f64.p0(ptr %ptr, i32 4, <2 x i1> %mask, <2 x double> %ptv2)
ret <2 x double> %res
}
; Can only speculate one lane (but it's the only one active)
-define <2 x double> @load_spec_lan0(<2 x double>* dereferenceable(8) %ptr, double %pt, <2 x i1> %mask) nofree nosync {
+define <2 x double> @load_spec_lan0(ptr dereferenceable(8) %ptr, double %pt, <2 x i1> %mask) nofree nosync {
; CHECK-LABEL: @load_spec_lan0(
; CHECK-NEXT: [[PTV1:%.*]] = insertelement <2 x double> poison, double [[PT:%.*]], i64 0
; CHECK-NEXT: [[PTV2:%.*]] = shufflevector <2 x double> [[PTV1]], <2 x double> poison, <2 x i32> zeroinitializer
; CHECK-NEXT: [[MASK2:%.*]] = insertelement <2 x i1> [[MASK:%.*]], i1 false, i64 1
-; CHECK-NEXT: [[RES:%.*]] = call <2 x double> @llvm.masked.load.v2f64.p0v2f64(<2 x double>* nonnull [[PTR:%.*]], i32 4, <2 x i1> [[MASK2]], <2 x double> [[PTV2]])
+; CHECK-NEXT: [[RES:%.*]] = call <2 x double> @llvm.masked.load.v2f64.p0(ptr nonnull [[PTR:%.*]], i32 4, <2 x i1> [[MASK2]], <2 x double> [[PTV2]])
; CHECK-NEXT: ret <2 x double> [[RES]]
;
%ptv1 = insertelement <2 x double> poison, double %pt, i64 0
%ptv2 = insertelement <2 x double> %ptv1, double %pt, i64 1
%mask2 = insertelement <2 x i1> %mask, i1 false, i64 1
- %res = call <2 x double> @llvm.masked.load.v2f64.p0v2f64(<2 x double>* %ptr, i32 4, <2 x i1> %mask2, <2 x double> %ptv2)
+ %res = call <2 x double> @llvm.masked.load.v2f64.p0(ptr %ptr, i32 4, <2 x i1> %mask2, <2 x double> %ptv2)
ret <2 x double> %res
}
-define void @store_zeromask(<2 x double>* %ptr, <2 x double> %val) {
+define void @store_zeromask(ptr %ptr, <2 x double> %val) {
; CHECK-LABEL: @store_zeromask(
; CHECK-NEXT: ret void
;
- call void @llvm.masked.store.v2f64.p0v2f64(<2 x double> %val, <2 x double>* %ptr, i32 4, <2 x i1> zeroinitializer)
+ call void @llvm.masked.store.v2f64.p0(<2 x double> %val, ptr %ptr, i32 4, <2 x i1> zeroinitializer)
ret void
}
-define void @store_onemask(<2 x double>* %ptr, <2 x double> %val) {
+define void @store_onemask(ptr %ptr, <2 x double> %val) {
; CHECK-LABEL: @store_onemask(
-; CHECK-NEXT: store <2 x double> [[VAL:%.*]], <2 x double>* [[PTR:%.*]], align 4
+; CHECK-NEXT: store <2 x double> [[VAL:%.*]], ptr [[PTR:%.*]], align 4
; CHECK-NEXT: ret void
;
- call void @llvm.masked.store.v2f64.p0v2f64(<2 x double> %val, <2 x double>* %ptr, i32 4, <2 x i1> <i1 1, i1 1>)
+ call void @llvm.masked.store.v2f64.p0(<2 x double> %val, ptr %ptr, i32 4, <2 x i1> <i1 1, i1 1>)
ret void
}
-define void @store_demandedelts(<2 x double>* %ptr, double %val) {
+define void @store_demandedelts(ptr %ptr, double %val) {
; CHECK-LABEL: @store_demandedelts(
; CHECK-NEXT: [[VALVEC1:%.*]] = insertelement <2 x double> poison, double [[VAL:%.*]], i64 0
-; CHECK-NEXT: call void @llvm.masked.store.v2f64.p0v2f64(<2 x double> [[VALVEC1]], <2 x double>* [[PTR:%.*]], i32 4, <2 x i1> <i1 true, i1 false>)
+; CHECK-NEXT: call void @llvm.masked.store.v2f64.p0(<2 x double> [[VALVEC1]], ptr [[PTR:%.*]], i32 4, <2 x i1> <i1 true, i1 false>)
; CHECK-NEXT: ret void
;
%valvec1 = insertelement <2 x double> poison, double %val, i32 0
%valvec2 = insertelement <2 x double> %valvec1, double %val, i32 1
- call void @llvm.masked.store.v2f64.p0v2f64(<2 x double> %valvec2, <2 x double>* %ptr, i32 4, <2 x i1> <i1 true, i1 false>)
+ call void @llvm.masked.store.v2f64.p0(<2 x double> %valvec2, ptr %ptr, i32 4, <2 x i1> <i1 true, i1 false>)
ret void
}
-define <2 x double> @gather_generic(<2 x double*> %ptrs, <2 x i1> %mask, <2 x double> %passthru) {
+define <2 x double> @gather_generic(<2 x ptr> %ptrs, <2 x i1> %mask, <2 x double> %passthru) {
; CHECK-LABEL: @gather_generic(
-; CHECK-NEXT: [[RES:%.*]] = call <2 x double> @llvm.masked.gather.v2f64.v2p0f64(<2 x double*> [[PTRS:%.*]], i32 4, <2 x i1> [[MASK:%.*]], <2 x double> [[PASSTHRU:%.*]])
+; CHECK-NEXT: [[RES:%.*]] = call <2 x double> @llvm.masked.gather.v2f64.v2p0(<2 x ptr> [[PTRS:%.*]], i32 4, <2 x i1> [[MASK:%.*]], <2 x double> [[PASSTHRU:%.*]])
; CHECK-NEXT: ret <2 x double> [[RES]]
;
- %res = call <2 x double> @llvm.masked.gather.v2f64.v2p0f64(<2 x double*> %ptrs, i32 4, <2 x i1> %mask, <2 x double> %passthru)
+ %res = call <2 x double> @llvm.masked.gather.v2f64.v2p0(<2 x ptr> %ptrs, i32 4, <2 x i1> %mask, <2 x double> %passthru)
ret <2 x double> %res
}
-define <2 x double> @gather_zeromask(<2 x double*> %ptrs, <2 x double> %passthru) {
+define <2 x double> @gather_zeromask(<2 x ptr> %ptrs, <2 x double> %passthru) {
; CHECK-LABEL: @gather_zeromask(
; CHECK-NEXT: ret <2 x double> [[PASSTHRU:%.*]]
;
- %res = call <2 x double> @llvm.masked.gather.v2f64.v2p0f64(<2 x double*> %ptrs, i32 4, <2 x i1> zeroinitializer, <2 x double> %passthru)
+ %res = call <2 x double> @llvm.masked.gather.v2f64.v2p0(<2 x ptr> %ptrs, i32 4, <2 x i1> zeroinitializer, <2 x double> %passthru)
ret <2 x double> %res
}
-define <2 x double> @gather_onemask(<2 x double*> %ptrs, <2 x double> %passthru) {
+define <2 x double> @gather_onemask(<2 x ptr> %ptrs, <2 x double> %passthru) {
; CHECK-LABEL: @gather_onemask(
-; CHECK-NEXT: [[RES:%.*]] = call <2 x double> @llvm.masked.gather.v2f64.v2p0f64(<2 x double*> [[PTRS:%.*]], i32 4, <2 x i1> <i1 true, i1 true>, <2 x double> poison)
+; CHECK-NEXT: [[RES:%.*]] = call <2 x double> @llvm.masked.gather.v2f64.v2p0(<2 x ptr> [[PTRS:%.*]], i32 4, <2 x i1> <i1 true, i1 true>, <2 x double> poison)
; CHECK-NEXT: ret <2 x double> [[RES]]
;
- %res = call <2 x double> @llvm.masked.gather.v2f64.v2p0f64(<2 x double*> %ptrs, i32 4, <2 x i1> <i1 true, i1 true>, <2 x double> %passthru)
+ %res = call <2 x double> @llvm.masked.gather.v2f64.v2p0(<2 x ptr> %ptrs, i32 4, <2 x i1> <i1 true, i1 true>, <2 x double> %passthru)
ret <2 x double> %res
}
-define <4 x double> @gather_lane2(double* %base, double %pt) {
+define <4 x double> @gather_lane2(ptr %base, double %pt) {
; CHECK-LABEL: @gather_lane2(
-; CHECK-NEXT: [[PTRS:%.*]] = getelementptr double, double* [[BASE:%.*]], <4 x i64> <i64 poison, i64 poison, i64 2, i64 poison>
+; CHECK-NEXT: [[PTRS:%.*]] = getelementptr double, ptr [[BASE:%.*]], <4 x i64> <i64 poison, i64 poison, i64 2, i64 poison>
; CHECK-NEXT: [[PT_V1:%.*]] = insertelement <4 x double> poison, double [[PT:%.*]], i64 0
; CHECK-NEXT: [[PT_V2:%.*]] = shufflevector <4 x double> [[PT_V1]], <4 x double> poison, <4 x i32> <i32 0, i32 0, i32 undef, i32 0>
-; CHECK-NEXT: [[RES:%.*]] = call <4 x double> @llvm.masked.gather.v4f64.v4p0f64(<4 x double*> [[PTRS]], i32 4, <4 x i1> <i1 false, i1 false, i1 true, i1 false>, <4 x double> [[PT_V2]])
+; CHECK-NEXT: [[RES:%.*]] = call <4 x double> @llvm.masked.gather.v4f64.v4p0(<4 x ptr> [[PTRS]], i32 4, <4 x i1> <i1 false, i1 false, i1 true, i1 false>, <4 x double> [[PT_V2]])
; CHECK-NEXT: ret <4 x double> [[RES]]
;
- %ptrs = getelementptr double, double *%base, <4 x i64> <i64 0, i64 1, i64 2, i64 3>
+ %ptrs = getelementptr double, ptr %base, <4 x i64> <i64 0, i64 1, i64 2, i64 3>
%pt_v1 = insertelement <4 x double> poison, double %pt, i64 0
%pt_v2 = shufflevector <4 x double> %pt_v1, <4 x double> poison, <4 x i32> zeroinitializer
- %res = call <4 x double> @llvm.masked.gather.v4f64.v4p0f64(<4 x double*> %ptrs, i32 4, <4 x i1> <i1 false, i1 false, i1 true, i1 false>, <4 x double> %pt_v2)
+ %res = call <4 x double> @llvm.masked.gather.v4f64.v4p0(<4 x ptr> %ptrs, i32 4, <4 x i1> <i1 false, i1 false, i1 true, i1 false>, <4 x double> %pt_v2)
ret <4 x double> %res
}
-define <2 x double> @gather_lane0_maybe(double* %base, double %pt, <2 x i1> %mask) {
+define <2 x double> @gather_lane0_maybe(ptr %base, double %pt, <2 x i1> %mask) {
; CHECK-LABEL: @gather_lane0_maybe(
-; CHECK-NEXT: [[PTRS:%.*]] = getelementptr double, double* [[BASE:%.*]], <2 x i64> <i64 0, i64 1>
+; CHECK-NEXT: [[PTRS:%.*]] = getelementptr double, ptr [[BASE:%.*]], <2 x i64> <i64 0, i64 1>
; CHECK-NEXT: [[PT_V1:%.*]] = insertelement <2 x double> poison, double [[PT:%.*]], i64 0
; CHECK-NEXT: [[PT_V2:%.*]] = shufflevector <2 x double> [[PT_V1]], <2 x double> poison, <2 x i32> zeroinitializer
; CHECK-NEXT: [[MASK2:%.*]] = insertelement <2 x i1> [[MASK:%.*]], i1 false, i64 1
-; CHECK-NEXT: [[RES:%.*]] = call <2 x double> @llvm.masked.gather.v2f64.v2p0f64(<2 x double*> [[PTRS]], i32 4, <2 x i1> [[MASK2]], <2 x double> [[PT_V2]])
+; CHECK-NEXT: [[RES:%.*]] = call <2 x double> @llvm.masked.gather.v2f64.v2p0(<2 x ptr> [[PTRS]], i32 4, <2 x i1> [[MASK2]], <2 x double> [[PT_V2]])
; CHECK-NEXT: ret <2 x double> [[RES]]
;
- %ptrs = getelementptr double, double *%base, <2 x i64> <i64 0, i64 1>
+ %ptrs = getelementptr double, ptr %base, <2 x i64> <i64 0, i64 1>
%pt_v1 = insertelement <2 x double> poison, double %pt, i64 0
%pt_v2 = insertelement <2 x double> %pt_v1, double %pt, i64 1
%mask2 = insertelement <2 x i1> %mask, i1 false, i64 1
- %res = call <2 x double> @llvm.masked.gather.v2f64.v2p0f64(<2 x double*> %ptrs, i32 4, <2 x i1> %mask2, <2 x double> %pt_v2)
+ %res = call <2 x double> @llvm.masked.gather.v2f64.v2p0(<2 x ptr> %ptrs, i32 4, <2 x i1> %mask2, <2 x double> %pt_v2)
ret <2 x double> %res
}
-define <2 x double> @gather_lane0_maybe_spec(double* %base, double %pt, <2 x i1> %mask) {
+define <2 x double> @gather_lane0_maybe_spec(ptr %base, double %pt, <2 x i1> %mask) {
; CHECK-LABEL: @gather_lane0_maybe_spec(
-; CHECK-NEXT: [[PTRS:%.*]] = getelementptr double, double* [[BASE:%.*]], <2 x i64> <i64 0, i64 1>
+; CHECK-NEXT: [[PTRS:%.*]] = getelementptr double, ptr [[BASE:%.*]], <2 x i64> <i64 0, i64 1>
; CHECK-NEXT: [[PT_V1:%.*]] = insertelement <2 x double> poison, double [[PT:%.*]], i64 0
; CHECK-NEXT: [[PT_V2:%.*]] = shufflevector <2 x double> [[PT_V1]], <2 x double> poison, <2 x i32> zeroinitializer
; CHECK-NEXT: [[MASK2:%.*]] = insertelement <2 x i1> [[MASK:%.*]], i1 false, i64 1
-; CHECK-NEXT: [[RES:%.*]] = call <2 x double> @llvm.masked.gather.v2f64.v2p0f64(<2 x double*> [[PTRS]], i32 4, <2 x i1> [[MASK2]], <2 x double> [[PT_V2]])
+; CHECK-NEXT: [[RES:%.*]] = call <2 x double> @llvm.masked.gather.v2f64.v2p0(<2 x ptr> [[PTRS]], i32 4, <2 x i1> [[MASK2]], <2 x double> [[PT_V2]])
; CHECK-NEXT: ret <2 x double> [[RES]]
;
- %ptrs = getelementptr double, double *%base, <2 x i64> <i64 0, i64 1>
+ %ptrs = getelementptr double, ptr %base, <2 x i64> <i64 0, i64 1>
%pt_v1 = insertelement <2 x double> poison, double %pt, i64 0
%pt_v2 = insertelement <2 x double> %pt_v1, double %pt, i64 1
%mask2 = insertelement <2 x i1> %mask, i1 false, i64 1
- %res = call <2 x double> @llvm.masked.gather.v2f64.v2p0f64(<2 x double*> %ptrs, i32 4, <2 x i1> %mask2, <2 x double> %pt_v2)
+ %res = call <2 x double> @llvm.masked.gather.v2f64.v2p0(<2 x ptr> %ptrs, i32 4, <2 x i1> %mask2, <2 x double> %pt_v2)
ret <2 x double> %res
}
-define void @scatter_zeromask(<2 x double*> %ptrs, <2 x double> %val) {
+define void @scatter_zeromask(<2 x ptr> %ptrs, <2 x double> %val) {
; CHECK-LABEL: @scatter_zeromask(
; CHECK-NEXT: ret void
;
- call void @llvm.masked.scatter.v2f64.v2p0f64(<2 x double> %val, <2 x double*> %ptrs, i32 8, <2 x i1> zeroinitializer)
+ call void @llvm.masked.scatter.v2f64.v2p0(<2 x double> %val, <2 x ptr> %ptrs, i32 8, <2 x i1> zeroinitializer)
ret void
}
-define void @scatter_demandedelts(double* %ptr, double %val) {
+define void @scatter_demandedelts(ptr %ptr, double %val) {
; CHECK-LABEL: @scatter_demandedelts(
-; CHECK-NEXT: [[PTRS:%.*]] = getelementptr double, double* [[PTR:%.*]], <2 x i64> <i64 0, i64 poison>
+; CHECK-NEXT: [[PTRS:%.*]] = getelementptr double, ptr [[PTR:%.*]], <2 x i64> <i64 0, i64 poison>
; CHECK-NEXT: [[VALVEC1:%.*]] = insertelement <2 x double> poison, double [[VAL:%.*]], i64 0
-; CHECK-NEXT: call void @llvm.masked.scatter.v2f64.v2p0f64(<2 x double> [[VALVEC1]], <2 x double*> [[PTRS]], i32 8, <2 x i1> <i1 true, i1 false>)
+; CHECK-NEXT: call void @llvm.masked.scatter.v2f64.v2p0(<2 x double> [[VALVEC1]], <2 x ptr> [[PTRS]], i32 8, <2 x i1> <i1 true, i1 false>)
; CHECK-NEXT: ret void
;
- %ptrs = getelementptr double, double* %ptr, <2 x i64> <i64 0, i64 1>
+ %ptrs = getelementptr double, ptr %ptr, <2 x i64> <i64 0, i64 1>
%valvec1 = insertelement <2 x double> poison, double %val, i32 0
%valvec2 = insertelement <2 x double> %valvec1, double %val, i32 1
- call void @llvm.masked.scatter.v2f64.v2p0f64(<2 x double> %valvec2, <2 x double*> %ptrs, i32 8, <2 x i1> <i1 true, i1 false>)
+ call void @llvm.masked.scatter.v2f64.v2p0(<2 x double> %valvec2, <2 x ptr> %ptrs, i32 8, <2 x i1> <i1 true, i1 false>)
ret void
}
; NOTE: Assertions have been autogenerated by utils/update_test_checks.py
; RUN: opt -passes=instcombine -S < %s | FileCheck %s
-declare <2 x double> @llvm.masked.load.v2f64.p0v2f64(<2 x double>* %ptrs, i32, <2 x i1> %mask, <2 x double> %src0)
-declare void @llvm.masked.store.v2f64.p0v2f64(<2 x double> %val, <2 x double>* %ptrs, i32, <2 x i1> %mask)
-declare <2 x double> @llvm.masked.gather.v2f64.v2p0f64(<2 x double*> %ptrs, i32, <2 x i1> %mask, <2 x double> %passthru)
-declare <4 x double> @llvm.masked.gather.v4f64.v4p0f64(<4 x double*> %ptrs, i32, <4 x i1> %mask, <4 x double> %passthru)
-declare void @llvm.masked.scatter.v2f64.v2p0f64(<2 x double> %val, <2 x double*> %ptrs, i32, <2 x i1> %mask)
+declare <2 x double> @llvm.masked.load.v2f64.p0(ptr %ptrs, i32, <2 x i1> %mask, <2 x double> %src0)
+declare void @llvm.masked.store.v2f64.p0(<2 x double> %val, ptr %ptrs, i32, <2 x i1> %mask)
+declare <2 x double> @llvm.masked.gather.v2f64.v2p0(<2 x ptr> %ptrs, i32, <2 x i1> %mask, <2 x double> %passthru)
+declare <4 x double> @llvm.masked.gather.v4f64.v4p0(<4 x ptr> %ptrs, i32, <4 x i1> %mask, <4 x double> %passthru)
+declare void @llvm.masked.scatter.v2f64.v2p0(<2 x double> %val, <2 x ptr> %ptrs, i32, <2 x i1> %mask)
-define <2 x double> @load_zeromask(<2 x double>* %ptr, <2 x double> %passthru) {
+define <2 x double> @load_zeromask(ptr %ptr, <2 x double> %passthru) {
; CHECK-LABEL: @load_zeromask(
; CHECK-NEXT: ret <2 x double> [[PASSTHRU:%.*]]
;
- %res = call <2 x double> @llvm.masked.load.v2f64.p0v2f64(<2 x double>* %ptr, i32 1, <2 x i1> zeroinitializer, <2 x double> %passthru)
+ %res = call <2 x double> @llvm.masked.load.v2f64.p0(ptr %ptr, i32 1, <2 x i1> zeroinitializer, <2 x double> %passthru)
ret <2 x double> %res
}
-define <2 x double> @load_onemask(<2 x double>* %ptr, <2 x double> %passthru) {
+define <2 x double> @load_onemask(ptr %ptr, <2 x double> %passthru) {
; CHECK-LABEL: @load_onemask(
-; CHECK-NEXT: [[UNMASKEDLOAD:%.*]] = load <2 x double>, <2 x double>* [[PTR:%.*]], align 2
+; CHECK-NEXT: [[UNMASKEDLOAD:%.*]] = load <2 x double>, ptr [[PTR:%.*]], align 2
; CHECK-NEXT: ret <2 x double> [[UNMASKEDLOAD]]
;
- %res = call <2 x double> @llvm.masked.load.v2f64.p0v2f64(<2 x double>* %ptr, i32 2, <2 x i1> <i1 1, i1 1>, <2 x double> %passthru)
+ %res = call <2 x double> @llvm.masked.load.v2f64.p0(ptr %ptr, i32 2, <2 x i1> <i1 1, i1 1>, <2 x double> %passthru)
ret <2 x double> %res
}
-define <2 x double> @load_undefmask(<2 x double>* %ptr, <2 x double> %passthru) {
+define <2 x double> @load_undefmask(ptr %ptr, <2 x double> %passthru) {
; CHECK-LABEL: @load_undefmask(
-; CHECK-NEXT: [[UNMASKEDLOAD:%.*]] = load <2 x double>, <2 x double>* [[PTR:%.*]], align 2
+; CHECK-NEXT: [[UNMASKEDLOAD:%.*]] = load <2 x double>, ptr [[PTR:%.*]], align 2
; CHECK-NEXT: ret <2 x double> [[UNMASKEDLOAD]]
;
- %res = call <2 x double> @llvm.masked.load.v2f64.p0v2f64(<2 x double>* %ptr, i32 2, <2 x i1> <i1 1, i1 undef>, <2 x double> %passthru)
+ %res = call <2 x double> @llvm.masked.load.v2f64.p0(ptr %ptr, i32 2, <2 x i1> <i1 1, i1 undef>, <2 x double> %passthru)
ret <2 x double> %res
}
@G = external global i8
-define <2 x double> @load_cemask(<2 x double>* %ptr, <2 x double> %passthru) {
+define <2 x double> @load_cemask(ptr %ptr, <2 x double> %passthru) {
; CHECK-LABEL: @load_cemask(
-; CHECK-NEXT: [[RES:%.*]] = call <2 x double> @llvm.masked.load.v2f64.p0v2f64(<2 x double>* [[PTR:%.*]], i32 2, <2 x i1> <i1 true, i1 ptrtoint (i8* @G to i1)>, <2 x double> [[PASSTHRU:%.*]])
+; CHECK-NEXT: [[RES:%.*]] = call <2 x double> @llvm.masked.load.v2f64.p0(ptr [[PTR:%.*]], i32 2, <2 x i1> <i1 true, i1 ptrtoint (ptr @G to i1)>, <2 x double> [[PASSTHRU:%.*]])
; CHECK-NEXT: ret <2 x double> [[RES]]
;
- %res = call <2 x double> @llvm.masked.load.v2f64.p0v2f64(<2 x double>* %ptr, i32 2, <2 x i1> <i1 1, i1 ptrtoint (i8* @G to i1)>, <2 x double> %passthru)
+ %res = call <2 x double> @llvm.masked.load.v2f64.p0(ptr %ptr, i32 2, <2 x i1> <i1 1, i1 ptrtoint (ptr @G to i1)>, <2 x double> %passthru)
ret <2 x double> %res
}
-define <2 x double> @load_lane0(<2 x double>* %ptr, double %pt) {
+define <2 x double> @load_lane0(ptr %ptr, double %pt) {
; CHECK-LABEL: @load_lane0(
; CHECK-NEXT: [[PTV2:%.*]] = insertelement <2 x double> poison, double [[PT:%.*]], i64 1
-; CHECK-NEXT: [[RES:%.*]] = call <2 x double> @llvm.masked.load.v2f64.p0v2f64(<2 x double>* [[PTR:%.*]], i32 2, <2 x i1> <i1 true, i1 false>, <2 x double> [[PTV2]])
+; CHECK-NEXT: [[RES:%.*]] = call <2 x double> @llvm.masked.load.v2f64.p0(ptr [[PTR:%.*]], i32 2, <2 x i1> <i1 true, i1 false>, <2 x double> [[PTV2]])
; CHECK-NEXT: ret <2 x double> [[RES]]
;
%ptv1 = insertelement <2 x double> undef, double %pt, i64 0
%ptv2 = insertelement <2 x double> %ptv1, double %pt, i64 1
- %res = call <2 x double> @llvm.masked.load.v2f64.p0v2f64(<2 x double>* %ptr, i32 2, <2 x i1> <i1 true, i1 false>, <2 x double> %ptv2)
+ %res = call <2 x double> @llvm.masked.load.v2f64.p0(ptr %ptr, i32 2, <2 x i1> <i1 true, i1 false>, <2 x double> %ptv2)
ret <2 x double> %res
}
-define double @load_all(double* %base, double %pt) {
+define double @load_all(ptr %base, double %pt) {
; CHECK-LABEL: @load_all(
-; CHECK-NEXT: [[PTRS:%.*]] = getelementptr double, double* [[BASE:%.*]], <4 x i64> <i64 0, i64 poison, i64 2, i64 3>
-; CHECK-NEXT: [[RES:%.*]] = call <4 x double> @llvm.masked.gather.v4f64.v4p0f64(<4 x double*> [[PTRS]], i32 4, <4 x i1> <i1 true, i1 false, i1 true, i1 true>, <4 x double> undef)
+; CHECK-NEXT: [[PTRS:%.*]] = getelementptr double, ptr [[BASE:%.*]], <4 x i64> <i64 0, i64 poison, i64 2, i64 3>
+; CHECK-NEXT: [[RES:%.*]] = call <4 x double> @llvm.masked.gather.v4f64.v4p0(<4 x ptr> [[PTRS]], i32 4, <4 x i1> <i1 true, i1 false, i1 true, i1 true>, <4 x double> undef)
; CHECK-NEXT: [[ELT:%.*]] = extractelement <4 x double> [[RES]], i64 2
; CHECK-NEXT: ret double [[ELT]]
;
- %ptrs = getelementptr double, double* %base, <4 x i64> <i64 0, i64 1, i64 2, i64 3>
- %res = call <4 x double> @llvm.masked.gather.v4f64.v4p0f64(<4 x double*> %ptrs, i32 4, <4 x i1> <i1 true, i1 false, i1 true, i1 true>, <4 x double> undef)
+ %ptrs = getelementptr double, ptr %base, <4 x i64> <i64 0, i64 1, i64 2, i64 3>
+ %res = call <4 x double> @llvm.masked.gather.v4f64.v4p0(<4 x ptr> %ptrs, i32 4, <4 x i1> <i1 true, i1 false, i1 true, i1 true>, <4 x double> undef)
%elt = extractelement <4 x double> %res, i64 2
ret double %elt
}
-define <2 x double> @load_generic(<2 x double>* %ptr, double %pt, <2 x i1> %mask) {
+define <2 x double> @load_generic(ptr %ptr, double %pt, <2 x i1> %mask) {
; CHECK-LABEL: @load_generic(
; CHECK-NEXT: [[PTV1:%.*]] = insertelement <2 x double> undef, double [[PT:%.*]], i64 0
; CHECK-NEXT: [[PTV2:%.*]] = shufflevector <2 x double> [[PTV1]], <2 x double> poison, <2 x i32> zeroinitializer
-; CHECK-NEXT: [[RES:%.*]] = call <2 x double> @llvm.masked.load.v2f64.p0v2f64(<2 x double>* [[PTR:%.*]], i32 4, <2 x i1> [[MASK:%.*]], <2 x double> [[PTV2]])
+; CHECK-NEXT: [[RES:%.*]] = call <2 x double> @llvm.masked.load.v2f64.p0(ptr [[PTR:%.*]], i32 4, <2 x i1> [[MASK:%.*]], <2 x double> [[PTV2]])
; CHECK-NEXT: ret <2 x double> [[RES]]
;
%ptv1 = insertelement <2 x double> undef, double %pt, i64 0
%ptv2 = insertelement <2 x double> %ptv1, double %pt, i64 1
- %res = call <2 x double> @llvm.masked.load.v2f64.p0v2f64(<2 x double>* %ptr, i32 4, <2 x i1> %mask, <2 x double> %ptv2)
+ %res = call <2 x double> @llvm.masked.load.v2f64.p0(ptr %ptr, i32 4, <2 x i1> %mask, <2 x double> %ptv2)
ret <2 x double> %res
}
-define <2 x double> @load_speculative(<2 x double>* dereferenceable(16) align 4 %ptr, double %pt, <2 x i1> %mask) nofree nosync {
+define <2 x double> @load_speculative(ptr dereferenceable(16) align 4 %ptr, double %pt, <2 x i1> %mask) nofree nosync {
; CHECK-LABEL: @load_speculative(
; CHECK-NEXT: [[PTV1:%.*]] = insertelement <2 x double> undef, double [[PT:%.*]], i64 0
; CHECK-NEXT: [[PTV2:%.*]] = shufflevector <2 x double> [[PTV1]], <2 x double> poison, <2 x i32> zeroinitializer
-; CHECK-NEXT: [[UNMASKEDLOAD:%.*]] = load <2 x double>, <2 x double>* [[PTR:%.*]], align 4
+; CHECK-NEXT: [[UNMASKEDLOAD:%.*]] = load <2 x double>, ptr [[PTR:%.*]], align 4
; CHECK-NEXT: [[TMP1:%.*]] = select <2 x i1> [[MASK:%.*]], <2 x double> [[UNMASKEDLOAD]], <2 x double> [[PTV2]]
; CHECK-NEXT: ret <2 x double> [[TMP1]]
;
%ptv1 = insertelement <2 x double> undef, double %pt, i64 0
%ptv2 = insertelement <2 x double> %ptv1, double %pt, i64 1
- %res = call <2 x double> @llvm.masked.load.v2f64.p0v2f64(<2 x double>* %ptr, i32 4, <2 x i1> %mask, <2 x double> %ptv2)
+ %res = call <2 x double> @llvm.masked.load.v2f64.p0(ptr %ptr, i32 4, <2 x i1> %mask, <2 x double> %ptv2)
ret <2 x double> %res
}
-define <2 x double> @load_speculative_less_aligned(<2 x double>* dereferenceable(16) %ptr, double %pt, <2 x i1> %mask) nofree nosync {
+define <2 x double> @load_speculative_less_aligned(ptr dereferenceable(16) %ptr, double %pt, <2 x i1> %mask) nofree nosync {
; CHECK-LABEL: @load_speculative_less_aligned(
; CHECK-NEXT: [[PTV1:%.*]] = insertelement <2 x double> undef, double [[PT:%.*]], i64 0
; CHECK-NEXT: [[PTV2:%.*]] = shufflevector <2 x double> [[PTV1]], <2 x double> poison, <2 x i32> zeroinitializer
-; CHECK-NEXT: [[UNMASKEDLOAD:%.*]] = load <2 x double>, <2 x double>* [[PTR:%.*]], align 4
+; CHECK-NEXT: [[UNMASKEDLOAD:%.*]] = load <2 x double>, ptr [[PTR:%.*]], align 4
; CHECK-NEXT: [[TMP1:%.*]] = select <2 x i1> [[MASK:%.*]], <2 x double> [[UNMASKEDLOAD]], <2 x double> [[PTV2]]
; CHECK-NEXT: ret <2 x double> [[TMP1]]
;
%ptv1 = insertelement <2 x double> undef, double %pt, i64 0
%ptv2 = insertelement <2 x double> %ptv1, double %pt, i64 1
- %res = call <2 x double> @llvm.masked.load.v2f64.p0v2f64(<2 x double>* %ptr, i32 4, <2 x i1> %mask, <2 x double> %ptv2)
+ %res = call <2 x double> @llvm.masked.load.v2f64.p0(ptr %ptr, i32 4, <2 x i1> %mask, <2 x double> %ptv2)
ret <2 x double> %res
}
; Can't speculate since only half of required size is known deref
-define <2 x double> @load_spec_neg_size(<2 x double>* dereferenceable(8) %ptr, double %pt, <2 x i1> %mask) nofree nosync {
+define <2 x double> @load_spec_neg_size(ptr dereferenceable(8) %ptr, double %pt, <2 x i1> %mask) nofree nosync {
; CHECK-LABEL: @load_spec_neg_size(
; CHECK-NEXT: [[PTV1:%.*]] = insertelement <2 x double> undef, double [[PT:%.*]], i64 0
; CHECK-NEXT: [[PTV2:%.*]] = shufflevector <2 x double> [[PTV1]], <2 x double> poison, <2 x i32> zeroinitializer
-; CHECK-NEXT: [[RES:%.*]] = call <2 x double> @llvm.masked.load.v2f64.p0v2f64(<2 x double>* nonnull [[PTR:%.*]], i32 4, <2 x i1> [[MASK:%.*]], <2 x double> [[PTV2]])
+; CHECK-NEXT: [[RES:%.*]] = call <2 x double> @llvm.masked.load.v2f64.p0(ptr nonnull [[PTR:%.*]], i32 4, <2 x i1> [[MASK:%.*]], <2 x double> [[PTV2]])
; CHECK-NEXT: ret <2 x double> [[RES]]
;
%ptv1 = insertelement <2 x double> undef, double %pt, i64 0
%ptv2 = insertelement <2 x double> %ptv1, double %pt, i64 1
- %res = call <2 x double> @llvm.masked.load.v2f64.p0v2f64(<2 x double>* %ptr, i32 4, <2 x i1> %mask, <2 x double> %ptv2)
+ %res = call <2 x double> @llvm.masked.load.v2f64.p0(ptr %ptr, i32 4, <2 x i1> %mask, <2 x double> %ptv2)
ret <2 x double> %res
}
; Can only speculate one lane (but it's the only one active)
-define <2 x double> @load_spec_lan0(<2 x double>* dereferenceable(8) %ptr, double %pt, <2 x i1> %mask) nofree nosync {
+define <2 x double> @load_spec_lan0(ptr dereferenceable(8) %ptr, double %pt, <2 x i1> %mask) nofree nosync {
; CHECK-LABEL: @load_spec_lan0(
; CHECK-NEXT: [[PTV1:%.*]] = insertelement <2 x double> undef, double [[PT:%.*]], i64 0
; CHECK-NEXT: [[PTV2:%.*]] = shufflevector <2 x double> [[PTV1]], <2 x double> poison, <2 x i32> zeroinitializer
; CHECK-NEXT: [[MASK2:%.*]] = insertelement <2 x i1> [[MASK:%.*]], i1 false, i64 1
-; CHECK-NEXT: [[RES:%.*]] = call <2 x double> @llvm.masked.load.v2f64.p0v2f64(<2 x double>* nonnull [[PTR:%.*]], i32 4, <2 x i1> [[MASK2]], <2 x double> [[PTV2]])
+; CHECK-NEXT: [[RES:%.*]] = call <2 x double> @llvm.masked.load.v2f64.p0(ptr nonnull [[PTR:%.*]], i32 4, <2 x i1> [[MASK2]], <2 x double> [[PTV2]])
; CHECK-NEXT: ret <2 x double> [[RES]]
;
%ptv1 = insertelement <2 x double> undef, double %pt, i64 0
%ptv2 = insertelement <2 x double> %ptv1, double %pt, i64 1
%mask2 = insertelement <2 x i1> %mask, i1 false, i64 1
- %res = call <2 x double> @llvm.masked.load.v2f64.p0v2f64(<2 x double>* %ptr, i32 4, <2 x i1> %mask2, <2 x double> %ptv2)
+ %res = call <2 x double> @llvm.masked.load.v2f64.p0(ptr %ptr, i32 4, <2 x i1> %mask2, <2 x double> %ptv2)
ret <2 x double> %res
}
-define void @store_zeromask(<2 x double>* %ptr, <2 x double> %val) {
+define void @store_zeromask(ptr %ptr, <2 x double> %val) {
; CHECK-LABEL: @store_zeromask(
; CHECK-NEXT: ret void
;
- call void @llvm.masked.store.v2f64.p0v2f64(<2 x double> %val, <2 x double>* %ptr, i32 4, <2 x i1> zeroinitializer)
+ call void @llvm.masked.store.v2f64.p0(<2 x double> %val, ptr %ptr, i32 4, <2 x i1> zeroinitializer)
ret void
}
-define void @store_onemask(<2 x double>* %ptr, <2 x double> %val) {
+define void @store_onemask(ptr %ptr, <2 x double> %val) {
; CHECK-LABEL: @store_onemask(
-; CHECK-NEXT: store <2 x double> [[VAL:%.*]], <2 x double>* [[PTR:%.*]], align 4
+; CHECK-NEXT: store <2 x double> [[VAL:%.*]], ptr [[PTR:%.*]], align 4
; CHECK-NEXT: ret void
;
- call void @llvm.masked.store.v2f64.p0v2f64(<2 x double> %val, <2 x double>* %ptr, i32 4, <2 x i1> <i1 1, i1 1>)
+ call void @llvm.masked.store.v2f64.p0(<2 x double> %val, ptr %ptr, i32 4, <2 x i1> <i1 1, i1 1>)
ret void
}
-define void @store_demandedelts(<2 x double>* %ptr, double %val) {
+define void @store_demandedelts(ptr %ptr, double %val) {
; CHECK-LABEL: @store_demandedelts(
; CHECK-NEXT: [[VALVEC1:%.*]] = insertelement <2 x double> undef, double [[VAL:%.*]], i64 0
-; CHECK-NEXT: call void @llvm.masked.store.v2f64.p0v2f64(<2 x double> [[VALVEC1]], <2 x double>* [[PTR:%.*]], i32 4, <2 x i1> <i1 true, i1 false>)
+; CHECK-NEXT: call void @llvm.masked.store.v2f64.p0(<2 x double> [[VALVEC1]], ptr [[PTR:%.*]], i32 4, <2 x i1> <i1 true, i1 false>)
; CHECK-NEXT: ret void
;
%valvec1 = insertelement <2 x double> undef, double %val, i32 0
%valvec2 = insertelement <2 x double> %valvec1, double %val, i32 1
- call void @llvm.masked.store.v2f64.p0v2f64(<2 x double> %valvec2, <2 x double>* %ptr, i32 4, <2 x i1> <i1 true, i1 false>)
+ call void @llvm.masked.store.v2f64.p0(<2 x double> %valvec2, ptr %ptr, i32 4, <2 x i1> <i1 true, i1 false>)
ret void
}
-define <2 x double> @gather_generic(<2 x double*> %ptrs, <2 x i1> %mask, <2 x double> %passthru) {
+define <2 x double> @gather_generic(<2 x ptr> %ptrs, <2 x i1> %mask, <2 x double> %passthru) {
; CHECK-LABEL: @gather_generic(
-; CHECK-NEXT: [[RES:%.*]] = call <2 x double> @llvm.masked.gather.v2f64.v2p0f64(<2 x double*> [[PTRS:%.*]], i32 4, <2 x i1> [[MASK:%.*]], <2 x double> [[PASSTHRU:%.*]])
+; CHECK-NEXT: [[RES:%.*]] = call <2 x double> @llvm.masked.gather.v2f64.v2p0(<2 x ptr> [[PTRS:%.*]], i32 4, <2 x i1> [[MASK:%.*]], <2 x double> [[PASSTHRU:%.*]])
; CHECK-NEXT: ret <2 x double> [[RES]]
;
- %res = call <2 x double> @llvm.masked.gather.v2f64.v2p0f64(<2 x double*> %ptrs, i32 4, <2 x i1> %mask, <2 x double> %passthru)
+ %res = call <2 x double> @llvm.masked.gather.v2f64.v2p0(<2 x ptr> %ptrs, i32 4, <2 x i1> %mask, <2 x double> %passthru)
ret <2 x double> %res
}
-define <2 x double> @gather_zeromask(<2 x double*> %ptrs, <2 x double> %passthru) {
+define <2 x double> @gather_zeromask(<2 x ptr> %ptrs, <2 x double> %passthru) {
; CHECK-LABEL: @gather_zeromask(
; CHECK-NEXT: ret <2 x double> [[PASSTHRU:%.*]]
;
- %res = call <2 x double> @llvm.masked.gather.v2f64.v2p0f64(<2 x double*> %ptrs, i32 4, <2 x i1> zeroinitializer, <2 x double> %passthru)
+ %res = call <2 x double> @llvm.masked.gather.v2f64.v2p0(<2 x ptr> %ptrs, i32 4, <2 x i1> zeroinitializer, <2 x double> %passthru)
ret <2 x double> %res
}
-define <2 x double> @gather_onemask(<2 x double*> %ptrs, <2 x double> %passthru) {
+define <2 x double> @gather_onemask(<2 x ptr> %ptrs, <2 x double> %passthru) {
; CHECK-LABEL: @gather_onemask(
-; CHECK-NEXT: [[RES:%.*]] = call <2 x double> @llvm.masked.gather.v2f64.v2p0f64(<2 x double*> [[PTRS:%.*]], i32 4, <2 x i1> <i1 true, i1 true>, <2 x double> poison)
+; CHECK-NEXT: [[RES:%.*]] = call <2 x double> @llvm.masked.gather.v2f64.v2p0(<2 x ptr> [[PTRS:%.*]], i32 4, <2 x i1> <i1 true, i1 true>, <2 x double> poison)
; CHECK-NEXT: ret <2 x double> [[RES]]
;
- %res = call <2 x double> @llvm.masked.gather.v2f64.v2p0f64(<2 x double*> %ptrs, i32 4, <2 x i1> <i1 true, i1 true>, <2 x double> %passthru)
+ %res = call <2 x double> @llvm.masked.gather.v2f64.v2p0(<2 x ptr> %ptrs, i32 4, <2 x i1> <i1 true, i1 true>, <2 x double> %passthru)
ret <2 x double> %res
}
-define <4 x double> @gather_lane2(double* %base, double %pt) {
+define <4 x double> @gather_lane2(ptr %base, double %pt) {
; CHECK-LABEL: @gather_lane2(
-; CHECK-NEXT: [[PTRS:%.*]] = getelementptr double, double* [[BASE:%.*]], <4 x i64> <i64 poison, i64 poison, i64 2, i64 poison>
+; CHECK-NEXT: [[PTRS:%.*]] = getelementptr double, ptr [[BASE:%.*]], <4 x i64> <i64 poison, i64 poison, i64 2, i64 poison>
; CHECK-NEXT: [[PT_V1:%.*]] = insertelement <4 x double> undef, double [[PT:%.*]], i64 0
; CHECK-NEXT: [[PT_V2:%.*]] = shufflevector <4 x double> [[PT_V1]], <4 x double> undef, <4 x i32> <i32 0, i32 0, i32 undef, i32 0>
-; CHECK-NEXT: [[RES:%.*]] = call <4 x double> @llvm.masked.gather.v4f64.v4p0f64(<4 x double*> [[PTRS]], i32 4, <4 x i1> <i1 false, i1 false, i1 true, i1 false>, <4 x double> [[PT_V2]])
+; CHECK-NEXT: [[RES:%.*]] = call <4 x double> @llvm.masked.gather.v4f64.v4p0(<4 x ptr> [[PTRS]], i32 4, <4 x i1> <i1 false, i1 false, i1 true, i1 false>, <4 x double> [[PT_V2]])
; CHECK-NEXT: ret <4 x double> [[RES]]
;
- %ptrs = getelementptr double, double *%base, <4 x i64> <i64 0, i64 1, i64 2, i64 3>
+ %ptrs = getelementptr double, ptr %base, <4 x i64> <i64 0, i64 1, i64 2, i64 3>
%pt_v1 = insertelement <4 x double> undef, double %pt, i64 0
%pt_v2 = shufflevector <4 x double> %pt_v1, <4 x double> undef, <4 x i32> zeroinitializer
- %res = call <4 x double> @llvm.masked.gather.v4f64.v4p0f64(<4 x double*> %ptrs, i32 4, <4 x i1> <i1 false, i1 false, i1 true, i1 false>, <4 x double> %pt_v2)
+ %res = call <4 x double> @llvm.masked.gather.v4f64.v4p0(<4 x ptr> %ptrs, i32 4, <4 x i1> <i1 false, i1 false, i1 true, i1 false>, <4 x double> %pt_v2)
ret <4 x double> %res
}
-define <2 x double> @gather_lane0_maybe(double* %base, double %pt, <2 x i1> %mask) {
+define <2 x double> @gather_lane0_maybe(ptr %base, double %pt, <2 x i1> %mask) {
; CHECK-LABEL: @gather_lane0_maybe(
-; CHECK-NEXT: [[PTRS:%.*]] = getelementptr double, double* [[BASE:%.*]], <2 x i64> <i64 0, i64 1>
+; CHECK-NEXT: [[PTRS:%.*]] = getelementptr double, ptr [[BASE:%.*]], <2 x i64> <i64 0, i64 1>
; CHECK-NEXT: [[PT_V1:%.*]] = insertelement <2 x double> undef, double [[PT:%.*]], i64 0
; CHECK-NEXT: [[PT_V2:%.*]] = shufflevector <2 x double> [[PT_V1]], <2 x double> poison, <2 x i32> zeroinitializer
; CHECK-NEXT: [[MASK2:%.*]] = insertelement <2 x i1> [[MASK:%.*]], i1 false, i64 1
-; CHECK-NEXT: [[RES:%.*]] = call <2 x double> @llvm.masked.gather.v2f64.v2p0f64(<2 x double*> [[PTRS]], i32 4, <2 x i1> [[MASK2]], <2 x double> [[PT_V2]])
+; CHECK-NEXT: [[RES:%.*]] = call <2 x double> @llvm.masked.gather.v2f64.v2p0(<2 x ptr> [[PTRS]], i32 4, <2 x i1> [[MASK2]], <2 x double> [[PT_V2]])
; CHECK-NEXT: ret <2 x double> [[RES]]
;
- %ptrs = getelementptr double, double *%base, <2 x i64> <i64 0, i64 1>
+ %ptrs = getelementptr double, ptr %base, <2 x i64> <i64 0, i64 1>
%pt_v1 = insertelement <2 x double> undef, double %pt, i64 0
%pt_v2 = insertelement <2 x double> %pt_v1, double %pt, i64 1
%mask2 = insertelement <2 x i1> %mask, i1 false, i64 1
- %res = call <2 x double> @llvm.masked.gather.v2f64.v2p0f64(<2 x double*> %ptrs, i32 4, <2 x i1> %mask2, <2 x double> %pt_v2)
+ %res = call <2 x double> @llvm.masked.gather.v2f64.v2p0(<2 x ptr> %ptrs, i32 4, <2 x i1> %mask2, <2 x double> %pt_v2)
ret <2 x double> %res
}
-define <2 x double> @gather_lane0_maybe_spec(double* %base, double %pt, <2 x i1> %mask) {
+define <2 x double> @gather_lane0_maybe_spec(ptr %base, double %pt, <2 x i1> %mask) {
; CHECK-LABEL: @gather_lane0_maybe_spec(
-; CHECK-NEXT: [[PTRS:%.*]] = getelementptr double, double* [[BASE:%.*]], <2 x i64> <i64 0, i64 1>
+; CHECK-NEXT: [[PTRS:%.*]] = getelementptr double, ptr [[BASE:%.*]], <2 x i64> <i64 0, i64 1>
; CHECK-NEXT: [[PT_V1:%.*]] = insertelement <2 x double> undef, double [[PT:%.*]], i64 0
; CHECK-NEXT: [[PT_V2:%.*]] = shufflevector <2 x double> [[PT_V1]], <2 x double> poison, <2 x i32> zeroinitializer
; CHECK-NEXT: [[MASK2:%.*]] = insertelement <2 x i1> [[MASK:%.*]], i1 false, i64 1
-; CHECK-NEXT: [[RES:%.*]] = call <2 x double> @llvm.masked.gather.v2f64.v2p0f64(<2 x double*> [[PTRS]], i32 4, <2 x i1> [[MASK2]], <2 x double> [[PT_V2]])
+; CHECK-NEXT: [[RES:%.*]] = call <2 x double> @llvm.masked.gather.v2f64.v2p0(<2 x ptr> [[PTRS]], i32 4, <2 x i1> [[MASK2]], <2 x double> [[PT_V2]])
; CHECK-NEXT: ret <2 x double> [[RES]]
;
- %ptrs = getelementptr double, double *%base, <2 x i64> <i64 0, i64 1>
+ %ptrs = getelementptr double, ptr %base, <2 x i64> <i64 0, i64 1>
%pt_v1 = insertelement <2 x double> undef, double %pt, i64 0
%pt_v2 = insertelement <2 x double> %pt_v1, double %pt, i64 1
%mask2 = insertelement <2 x i1> %mask, i1 false, i64 1
- %res = call <2 x double> @llvm.masked.gather.v2f64.v2p0f64(<2 x double*> %ptrs, i32 4, <2 x i1> %mask2, <2 x double> %pt_v2)
+ %res = call <2 x double> @llvm.masked.gather.v2f64.v2p0(<2 x ptr> %ptrs, i32 4, <2 x i1> %mask2, <2 x double> %pt_v2)
ret <2 x double> %res
}
-define void @scatter_zeromask(<2 x double*> %ptrs, <2 x double> %val) {
+define void @scatter_zeromask(<2 x ptr> %ptrs, <2 x double> %val) {
; CHECK-LABEL: @scatter_zeromask(
; CHECK-NEXT: ret void
;
- call void @llvm.masked.scatter.v2f64.v2p0f64(<2 x double> %val, <2 x double*> %ptrs, i32 8, <2 x i1> zeroinitializer)
+ call void @llvm.masked.scatter.v2f64.v2p0(<2 x double> %val, <2 x ptr> %ptrs, i32 8, <2 x i1> zeroinitializer)
ret void
}
-define void @scatter_demandedelts(double* %ptr, double %val) {
+define void @scatter_demandedelts(ptr %ptr, double %val) {
; CHECK-LABEL: @scatter_demandedelts(
-; CHECK-NEXT: [[PTRS:%.*]] = getelementptr double, double* [[PTR:%.*]], <2 x i64> <i64 0, i64 poison>
+; CHECK-NEXT: [[PTRS:%.*]] = getelementptr double, ptr [[PTR:%.*]], <2 x i64> <i64 0, i64 poison>
; CHECK-NEXT: [[VALVEC1:%.*]] = insertelement <2 x double> undef, double [[VAL:%.*]], i64 0
-; CHECK-NEXT: call void @llvm.masked.scatter.v2f64.v2p0f64(<2 x double> [[VALVEC1]], <2 x double*> [[PTRS]], i32 8, <2 x i1> <i1 true, i1 false>)
+; CHECK-NEXT: call void @llvm.masked.scatter.v2f64.v2p0(<2 x double> [[VALVEC1]], <2 x ptr> [[PTRS]], i32 8, <2 x i1> <i1 true, i1 false>)
; CHECK-NEXT: ret void
;
- %ptrs = getelementptr double, double* %ptr, <2 x i64> <i64 0, i64 1>
+ %ptrs = getelementptr double, ptr %ptr, <2 x i64> <i64 0, i64 1>
%valvec1 = insertelement <2 x double> undef, double %val, i32 0
%valvec2 = insertelement <2 x double> %valvec1, double %val, i32 1
- call void @llvm.masked.scatter.v2f64.v2p0f64(<2 x double> %valvec2, <2 x double*> %ptrs, i32 8, <2 x i1> <i1 true, i1 false>)
+ call void @llvm.masked.scatter.v2f64.v2p0(<2 x double> %valvec2, <2 x ptr> %ptrs, i32 8, <2 x i1> <i1 true, i1 false>)
ret void
}
; Test scatters that can be simplified to scalar stores.
;; Value splat (mask is not used)
-define void @scatter_v4i16_uniform_vals_uniform_ptrs_no_all_active_mask(i16* %dst, i16 %val) {
+define void @scatter_v4i16_uniform_vals_uniform_ptrs_no_all_active_mask(ptr %dst, i16 %val) {
; CHECK-LABEL: @scatter_v4i16_uniform_vals_uniform_ptrs_no_all_active_mask(
; CHECK-NEXT: entry:
-; CHECK-NEXT: store i16 [[VAL:%.*]], i16* [[DST:%.*]], align 2
+; CHECK-NEXT: store i16 [[VAL:%.*]], ptr [[DST:%.*]], align 2
; CHECK-NEXT: ret void
;
entry:
- %broadcast.splatinsert = insertelement <4 x i16*> poison, i16* %dst, i32 0
- %broadcast.splat = shufflevector <4 x i16*> %broadcast.splatinsert, <4 x i16*> poison, <4 x i32> zeroinitializer
+ %broadcast.splatinsert = insertelement <4 x ptr> poison, ptr %dst, i32 0
+ %broadcast.splat = shufflevector <4 x ptr> %broadcast.splatinsert, <4 x ptr> poison, <4 x i32> zeroinitializer
%broadcast.value = insertelement <4 x i16> poison, i16 %val, i32 0
%broadcast.splatvalue = shufflevector <4 x i16> %broadcast.value, <4 x i16> poison, <4 x i32> zeroinitializer
- call void @llvm.masked.scatter.v4i16.v4p0i16(<4 x i16> %broadcast.splatvalue, <4 x i16*> %broadcast.splat, i32 2, <4 x i1> <i1 0, i1 0, i1 1, i1 1>)
+ call void @llvm.masked.scatter.v4i16.v4p0(<4 x i16> %broadcast.splatvalue, <4 x ptr> %broadcast.splat, i32 2, <4 x i1> <i1 0, i1 0, i1 1, i1 1>)
ret void
}
-define void @scatter_nxv4i16_uniform_vals_uniform_ptrs_all_active_mask(i16* %dst, i16 %val) {
+define void @scatter_nxv4i16_uniform_vals_uniform_ptrs_all_active_mask(ptr %dst, i16 %val) {
; CHECK-LABEL: @scatter_nxv4i16_uniform_vals_uniform_ptrs_all_active_mask(
; CHECK-NEXT: entry:
-; CHECK-NEXT: store i16 [[VAL:%.*]], i16* [[DST:%.*]], align 2
+; CHECK-NEXT: store i16 [[VAL:%.*]], ptr [[DST:%.*]], align 2
; CHECK-NEXT: ret void
;
entry:
- %broadcast.splatinsert = insertelement <vscale x 4 x i16*> poison, i16* %dst, i32 0
- %broadcast.splat = shufflevector <vscale x 4 x i16*> %broadcast.splatinsert, <vscale x 4 x i16*> poison, <vscale x 4 x i32> zeroinitializer
+ %broadcast.splatinsert = insertelement <vscale x 4 x ptr> poison, ptr %dst, i32 0
+ %broadcast.splat = shufflevector <vscale x 4 x ptr> %broadcast.splatinsert, <vscale x 4 x ptr> poison, <vscale x 4 x i32> zeroinitializer
%broadcast.value = insertelement <vscale x 4 x i16> poison, i16 %val, i32 0
%broadcast.splatvalue = shufflevector <vscale x 4 x i16> %broadcast.value, <vscale x 4 x i16> poison, <vscale x 4 x i32> zeroinitializer
- call void @llvm.masked.scatter.nxv4i16.nxv4p0i16(<vscale x 4 x i16> %broadcast.splatvalue, <vscale x 4 x i16*> %broadcast.splat, i32 2, <vscale x 4 x i1> shufflevector (<vscale x 4 x i1> insertelement (<vscale x 4 x i1> zeroinitializer , i1 true, i32 0), <vscale x 4 x i1> zeroinitializer, <vscale x 4 x i32> zeroinitializer))
+ call void @llvm.masked.scatter.nxv4i16.nxv4p0i16(<vscale x 4 x i16> %broadcast.splatvalue, <vscale x 4 x ptr> %broadcast.splat, i32 2, <vscale x 4 x i1> shufflevector (<vscale x 4 x i1> insertelement (<vscale x 4 x i1> zeroinitializer , i1 true, i32 0), <vscale x 4 x i1> zeroinitializer, <vscale x 4 x i32> zeroinitializer))
ret void
}
;; The pointer is splat and mask is all active, but value is not a splat
-define void @scatter_v4i16_no_uniform_vals_uniform_ptrs_all_active_mask(i16* %dst, <4 x i16>* %src) {
+define void @scatter_v4i16_no_uniform_vals_uniform_ptrs_all_active_mask(ptr %dst, ptr %src) {
; CHECK-LABEL: @scatter_v4i16_no_uniform_vals_uniform_ptrs_all_active_mask(
; CHECK-NEXT: entry:
-; CHECK-NEXT: [[WIDE_LOAD:%.*]] = load <4 x i16>, <4 x i16>* [[SRC:%.*]], align 2
+; CHECK-NEXT: [[WIDE_LOAD:%.*]] = load <4 x i16>, ptr [[SRC:%.*]], align 2
; CHECK-NEXT: [[TMP0:%.*]] = extractelement <4 x i16> [[WIDE_LOAD]], i64 3
-; CHECK-NEXT: store i16 [[TMP0]], i16* [[DST:%.*]], align 2
+; CHECK-NEXT: store i16 [[TMP0]], ptr [[DST:%.*]], align 2
; CHECK-NEXT: ret void
;
entry:
- %broadcast.splatinsert = insertelement <4 x i16*> poison, i16* %dst, i32 0
- %broadcast.splat = shufflevector <4 x i16*> %broadcast.splatinsert, <4 x i16*> poison, <4 x i32> zeroinitializer
- %wide.load = load <4 x i16>, <4 x i16>* %src, align 2
- call void @llvm.masked.scatter.v4i16.v4p0i16(<4 x i16> %wide.load, <4 x i16*> %broadcast.splat, i32 2, <4 x i1> <i1 1, i1 1, i1 1, i1 1>)
+ %broadcast.splatinsert = insertelement <4 x ptr> poison, ptr %dst, i32 0
+ %broadcast.splat = shufflevector <4 x ptr> %broadcast.splatinsert, <4 x ptr> poison, <4 x i32> zeroinitializer
+ %wide.load = load <4 x i16>, ptr %src, align 2
+ call void @llvm.masked.scatter.v4i16.v4p0(<4 x i16> %wide.load, <4 x ptr> %broadcast.splat, i32 2, <4 x i1> <i1 1, i1 1, i1 1, i1 1>)
ret void
}
-define void @scatter_nxv4i16_no_uniform_vals_uniform_ptrs_all_active_mask(i16* %dst, <vscale x 4 x i16>* %src) {
+define void @scatter_nxv4i16_no_uniform_vals_uniform_ptrs_all_active_mask(ptr %dst, ptr %src) {
; CHECK-LABEL: @scatter_nxv4i16_no_uniform_vals_uniform_ptrs_all_active_mask(
; CHECK-NEXT: entry:
-; CHECK-NEXT: [[WIDE_LOAD:%.*]] = load <vscale x 4 x i16>, <vscale x 4 x i16>* [[SRC:%.*]], align 2
+; CHECK-NEXT: [[WIDE_LOAD:%.*]] = load <vscale x 4 x i16>, ptr [[SRC:%.*]], align 2
; CHECK-NEXT: [[TMP0:%.*]] = call i32 @llvm.vscale.i32()
; CHECK-NEXT: [[TMP1:%.*]] = shl i32 [[TMP0]], 2
; CHECK-NEXT: [[TMP2:%.*]] = add i32 [[TMP1]], -1
; CHECK-NEXT: [[TMP3:%.*]] = extractelement <vscale x 4 x i16> [[WIDE_LOAD]], i32 [[TMP2]]
-; CHECK-NEXT: store i16 [[TMP3]], i16* [[DST:%.*]], align 2
+; CHECK-NEXT: store i16 [[TMP3]], ptr [[DST:%.*]], align 2
; CHECK-NEXT: ret void
;
entry:
- %broadcast.splatinsert = insertelement <vscale x 4 x i16*> poison, i16* %dst, i32 0
- %broadcast.splat = shufflevector <vscale x 4 x i16*> %broadcast.splatinsert, <vscale x 4 x i16*> poison, <vscale x 4 x i32> zeroinitializer
- %wide.load = load <vscale x 4 x i16>, <vscale x 4 x i16>* %src, align 2
- call void @llvm.masked.scatter.nxv4i16.nxv4p0i16(<vscale x 4 x i16> %wide.load, <vscale x 4 x i16*> %broadcast.splat, i32 2, <vscale x 4 x i1> shufflevector (<vscale x 4 x i1> insertelement (<vscale x 4 x i1> poison, i1 true, i32 0), <vscale x 4 x i1> poison, <vscale x 4 x i32> zeroinitializer))
+ %broadcast.splatinsert = insertelement <vscale x 4 x ptr> poison, ptr %dst, i32 0
+ %broadcast.splat = shufflevector <vscale x 4 x ptr> %broadcast.splatinsert, <vscale x 4 x ptr> poison, <vscale x 4 x i32> zeroinitializer
+ %wide.load = load <vscale x 4 x i16>, ptr %src, align 2
+ call void @llvm.masked.scatter.nxv4i16.nxv4p0i16(<vscale x 4 x i16> %wide.load, <vscale x 4 x ptr> %broadcast.splat, i32 2, <vscale x 4 x i1> shufflevector (<vscale x 4 x i1> insertelement (<vscale x 4 x i1> poison, i1 true, i32 0), <vscale x 4 x i1> poison, <vscale x 4 x i32> zeroinitializer))
ret void
}
; Negative scatter tests
;; Pointer is splat, but mask is not all active and value is not a splat
-define void @negative_scatter_v4i16_no_uniform_vals_uniform_ptrs_all_inactive_mask(i16* %dst, <4 x i16>* %src) {
+define void @negative_scatter_v4i16_no_uniform_vals_uniform_ptrs_all_inactive_mask(ptr %dst, ptr %src) {
; CHECK-LABEL: @negative_scatter_v4i16_no_uniform_vals_uniform_ptrs_all_inactive_mask(
-; CHECK-NEXT: [[INSERT_ELT:%.*]] = insertelement <4 x i16*> poison, i16* [[DST:%.*]], i64 0
-; CHECK-NEXT: [[BROADCAST_SPLAT:%.*]] = shufflevector <4 x i16*> [[INSERT_ELT]], <4 x i16*> poison, <4 x i32> <i32 undef, i32 undef, i32 0, i32 0>
-; CHECK-NEXT: [[WIDE_LOAD:%.*]] = load <4 x i16>, <4 x i16>* [[SRC:%.*]], align 2
-; CHECK-NEXT: call void @llvm.masked.scatter.v4i16.v4p0i16(<4 x i16> [[WIDE_LOAD]], <4 x i16*> [[BROADCAST_SPLAT]], i32 2, <4 x i1> <i1 false, i1 false, i1 true, i1 true>)
+; CHECK-NEXT: [[INSERT_ELT:%.*]] = insertelement <4 x ptr> poison, ptr [[DST:%.*]], i64 0
+; CHECK-NEXT: [[BROADCAST_SPLAT:%.*]] = shufflevector <4 x ptr> [[INSERT_ELT]], <4 x ptr> poison, <4 x i32> <i32 undef, i32 undef, i32 0, i32 0>
+; CHECK-NEXT: [[WIDE_LOAD:%.*]] = load <4 x i16>, ptr [[SRC:%.*]], align 2
+; CHECK-NEXT: call void @llvm.masked.scatter.v4i16.v4p0(<4 x i16> [[WIDE_LOAD]], <4 x ptr> [[BROADCAST_SPLAT]], i32 2, <4 x i1> <i1 false, i1 false, i1 true, i1 true>)
; CHECK-NEXT: ret void
;
- %insert.elt = insertelement <4 x i16*> poison, i16* %dst, i32 0
- %broadcast.splat = shufflevector <4 x i16*> %insert.elt, <4 x i16*> poison, <4 x i32> zeroinitializer
- %wide.load = load <4 x i16>, <4 x i16>* %src, align 2
- call void @llvm.masked.scatter.v4i16.v4p0i16(<4 x i16> %wide.load, <4 x i16*> %broadcast.splat, i32 2, <4 x i1> <i1 0, i1 0, i1 1, i1 1>)
+ %insert.elt = insertelement <4 x ptr> poison, ptr %dst, i32 0
+ %broadcast.splat = shufflevector <4 x ptr> %insert.elt, <4 x ptr> poison, <4 x i32> zeroinitializer
+ %wide.load = load <4 x i16>, ptr %src, align 2
+ call void @llvm.masked.scatter.v4i16.v4p0(<4 x i16> %wide.load, <4 x ptr> %broadcast.splat, i32 2, <4 x i1> <i1 0, i1 0, i1 1, i1 1>)
ret void
}
;; The pointer in NOT a splat
-define void @negative_scatter_v4i16_no_uniform_vals_no_uniform_ptrs_all_active_mask(<4 x i16*> %inPtr, <4 x i16>* %src) {
+define void @negative_scatter_v4i16_no_uniform_vals_no_uniform_ptrs_all_active_mask(<4 x ptr> %inPtr, ptr %src) {
; CHECK-LABEL: @negative_scatter_v4i16_no_uniform_vals_no_uniform_ptrs_all_active_mask(
-; CHECK-NEXT: [[BROADCAST:%.*]] = shufflevector <4 x i16*> [[INPTR:%.*]], <4 x i16*> poison, <4 x i32> zeroinitializer
-; CHECK-NEXT: [[WIDE_LOAD:%.*]] = load <4 x i16>, <4 x i16>* [[SRC:%.*]], align 2
-; CHECK-NEXT: call void @llvm.masked.scatter.v4i16.v4p0i16(<4 x i16> [[WIDE_LOAD]], <4 x i16*> [[BROADCAST]], i32 2, <4 x i1> <i1 true, i1 true, i1 true, i1 true>)
+; CHECK-NEXT: [[BROADCAST:%.*]] = shufflevector <4 x ptr> [[INPTR:%.*]], <4 x ptr> poison, <4 x i32> zeroinitializer
+; CHECK-NEXT: [[WIDE_LOAD:%.*]] = load <4 x i16>, ptr [[SRC:%.*]], align 2
+; CHECK-NEXT: call void @llvm.masked.scatter.v4i16.v4p0(<4 x i16> [[WIDE_LOAD]], <4 x ptr> [[BROADCAST]], i32 2, <4 x i1> <i1 true, i1 true, i1 true, i1 true>)
; CHECK-NEXT: ret void
;
- %broadcast= shufflevector <4 x i16*> %inPtr, <4 x i16*> poison, <4 x i32> zeroinitializer
- %wide.load = load <4 x i16>, <4 x i16>* %src, align 2
- call void @llvm.masked.scatter.v4i16.v4p0i16(<4 x i16> %wide.load, <4 x i16*> %broadcast, i32 2, <4 x i1> <i1 1, i1 1, i1 1, i1 1> )
+ %broadcast= shufflevector <4 x ptr> %inPtr, <4 x ptr> poison, <4 x i32> zeroinitializer
+ %wide.load = load <4 x i16>, ptr %src, align 2
+ call void @llvm.masked.scatter.v4i16.v4p0(<4 x i16> %wide.load, <4 x ptr> %broadcast, i32 2, <4 x i1> <i1 1, i1 1, i1 1, i1 1> )
ret void
}
; Function Attrs:
-declare void @llvm.masked.scatter.v4i16.v4p0i16(<4 x i16>, <4 x i16*>, i32 immarg, <4 x i1>)
-declare void @llvm.masked.scatter.nxv4i16.nxv4p0i16(<vscale x 4 x i16>, <vscale x 4 x i16*>, i32 immarg, <vscale x 4 x i1>)
+declare void @llvm.masked.scatter.v4i16.v4p0(<4 x i16>, <4 x ptr>, i32 immarg, <4 x i1>)
+declare void @llvm.masked.scatter.nxv4i16.nxv4p0i16(<vscale x 4 x i16>, <vscale x 4 x ptr>, i32 immarg, <vscale x 4 x i1>)
; Test gathers that can be simplified to scalar load + splat
;; Splat address and all active mask
-define <vscale x 2 x i64> @gather_nxv2i64_uniform_ptrs_all_active_mask(i64* %src) {
+define <vscale x 2 x i64> @gather_nxv2i64_uniform_ptrs_all_active_mask(ptr %src) {
; CHECK-LABEL: @gather_nxv2i64_uniform_ptrs_all_active_mask(
-; CHECK-NEXT: [[LOAD_SCALAR:%.*]] = load i64, i64* [[SRC:%.*]], align 8
+; CHECK-NEXT: [[LOAD_SCALAR:%.*]] = load i64, ptr [[SRC:%.*]], align 8
; CHECK-NEXT: [[BROADCAST_SPLATINSERT1:%.*]] = insertelement <vscale x 2 x i64> poison, i64 [[LOAD_SCALAR]], i64 0
; CHECK-NEXT: [[BROADCAST_SPLAT2:%.*]] = shufflevector <vscale x 2 x i64> [[BROADCAST_SPLATINSERT1]], <vscale x 2 x i64> poison, <vscale x 2 x i32> zeroinitializer
; CHECK-NEXT: ret <vscale x 2 x i64> [[BROADCAST_SPLAT2]]
;
- %broadcast.splatinsert = insertelement <vscale x 2 x i64*> poison, i64 *%src, i32 0
- %broadcast.splat = shufflevector <vscale x 2 x i64*> %broadcast.splatinsert, <vscale x 2 x i64*> poison, <vscale x 2 x i32> zeroinitializer
- %res = call <vscale x 2 x i64> @llvm.masked.gather.nxv2i64(<vscale x 2 x i64*> %broadcast.splat, i32 8, <vscale x 2 x i1> shufflevector (<vscale x 2 x i1> insertelement (<vscale x 2 x i1> poison, i1 true, i32 0), <vscale x 2 x i1> poison, <vscale x 2 x i32> zeroinitializer), <vscale x 2 x i64> undef)
+ %broadcast.splatinsert = insertelement <vscale x 2 x ptr> poison, ptr %src, i32 0
+ %broadcast.splat = shufflevector <vscale x 2 x ptr> %broadcast.splatinsert, <vscale x 2 x ptr> poison, <vscale x 2 x i32> zeroinitializer
+ %res = call <vscale x 2 x i64> @llvm.masked.gather.nxv2i64(<vscale x 2 x ptr> %broadcast.splat, i32 8, <vscale x 2 x i1> shufflevector (<vscale x 2 x i1> insertelement (<vscale x 2 x i1> poison, i1 true, i32 0), <vscale x 2 x i1> poison, <vscale x 2 x i32> zeroinitializer), <vscale x 2 x i64> undef)
ret <vscale x 2 x i64> %res
}
-define <2 x i64> @gather_v2i64_uniform_ptrs_all_active_mask(i64* %src) {
+define <2 x i64> @gather_v2i64_uniform_ptrs_all_active_mask(ptr %src) {
; CHECK-LABEL: @gather_v2i64_uniform_ptrs_all_active_mask(
-; CHECK-NEXT: [[LOAD_SCALAR:%.*]] = load i64, i64* [[SRC:%.*]], align 8
+; CHECK-NEXT: [[LOAD_SCALAR:%.*]] = load i64, ptr [[SRC:%.*]], align 8
; CHECK-NEXT: [[BROADCAST_SPLATINSERT1:%.*]] = insertelement <2 x i64> poison, i64 [[LOAD_SCALAR]], i64 0
; CHECK-NEXT: [[BROADCAST_SPLAT2:%.*]] = shufflevector <2 x i64> [[BROADCAST_SPLATINSERT1]], <2 x i64> poison, <2 x i32> zeroinitializer
; CHECK-NEXT: ret <2 x i64> [[BROADCAST_SPLAT2]]
;
- %broadcast.splatinsert = insertelement <2 x i64*> poison, i64 *%src, i32 0
- %broadcast.splat = shufflevector <2 x i64*> %broadcast.splatinsert, <2 x i64*> poison, <2 x i32> zeroinitializer
- %res = call <2 x i64> @llvm.masked.gather.v2i64(<2 x i64*> %broadcast.splat, i32 8, <2 x i1> <i1 1, i1 1>, <2 x i64> undef)
+ %broadcast.splatinsert = insertelement <2 x ptr> poison, ptr %src, i32 0
+ %broadcast.splat = shufflevector <2 x ptr> %broadcast.splatinsert, <2 x ptr> poison, <2 x i32> zeroinitializer
+ %res = call <2 x i64> @llvm.masked.gather.v2i64(<2 x ptr> %broadcast.splat, i32 8, <2 x i1> <i1 1, i1 1>, <2 x i64> undef)
ret <2 x i64> %res
}
; Negative gather tests
;; Vector of pointers is not a splat.
-define <2 x i64> @negative_gather_v2i64_non_uniform_ptrs_all_active_mask(<2 x i64*> %inVal, i64* %src ) {
+define <2 x i64> @negative_gather_v2i64_non_uniform_ptrs_all_active_mask(<2 x ptr> %inVal, ptr %src ) {
; CHECK-LABEL: @negative_gather_v2i64_non_uniform_ptrs_all_active_mask(
-; CHECK-NEXT: [[INSERT_VALUE:%.*]] = insertelement <2 x i64*> [[INVAL:%.*]], i64* [[SRC:%.*]], i64 1
-; CHECK-NEXT: [[RES:%.*]] = call <2 x i64> @llvm.masked.gather.v2i64.v2p0i64(<2 x i64*> [[INSERT_VALUE]], i32 8, <2 x i1> <i1 true, i1 true>, <2 x i64> undef)
+; CHECK-NEXT: [[INSERT_VALUE:%.*]] = insertelement <2 x ptr> [[INVAL:%.*]], ptr [[SRC:%.*]], i64 1
+; CHECK-NEXT: [[RES:%.*]] = call <2 x i64> @llvm.masked.gather.v2i64.v2p0(<2 x ptr> [[INSERT_VALUE]], i32 8, <2 x i1> <i1 true, i1 true>, <2 x i64> undef)
; CHECK-NEXT: ret <2 x i64> [[RES]]
;
- %insert.value = insertelement <2 x i64*> %inVal, i64 *%src, i32 1
- %res = call <2 x i64> @llvm.masked.gather.v2i64(<2 x i64*> %insert.value, i32 8, <2 x i1><i1 1, i1 1>, <2 x i64> undef)
+ %insert.value = insertelement <2 x ptr> %inVal, ptr %src, i32 1
+ %res = call <2 x i64> @llvm.masked.gather.v2i64(<2 x ptr> %insert.value, i32 8, <2 x i1><i1 1, i1 1>, <2 x i64> undef)
ret <2 x i64> %res
}
;; Unknown mask value
-define <2 x i64> @negative_gather_v2i64_uniform_ptrs_no_all_active_mask(i64* %src, <2 x i1> %mask) {
+define <2 x i64> @negative_gather_v2i64_uniform_ptrs_no_all_active_mask(ptr %src, <2 x i1> %mask) {
; CHECK-LABEL: @negative_gather_v2i64_uniform_ptrs_no_all_active_mask(
-; CHECK-NEXT: [[BROADCAST_SPLATINSERT:%.*]] = insertelement <2 x i64*> poison, i64* [[SRC:%.*]], i64 0
-; CHECK-NEXT: [[BROADCAST_SPLAT:%.*]] = shufflevector <2 x i64*> [[BROADCAST_SPLATINSERT]], <2 x i64*> poison, <2 x i32> zeroinitializer
-; CHECK-NEXT: [[RES:%.*]] = call <2 x i64> @llvm.masked.gather.v2i64.v2p0i64(<2 x i64*> [[BROADCAST_SPLAT]], i32 8, <2 x i1> [[MASK:%.*]], <2 x i64> undef)
+; CHECK-NEXT: [[BROADCAST_SPLATINSERT:%.*]] = insertelement <2 x ptr> poison, ptr [[SRC:%.*]], i64 0
+; CHECK-NEXT: [[BROADCAST_SPLAT:%.*]] = shufflevector <2 x ptr> [[BROADCAST_SPLATINSERT]], <2 x ptr> poison, <2 x i32> zeroinitializer
+; CHECK-NEXT: [[RES:%.*]] = call <2 x i64> @llvm.masked.gather.v2i64.v2p0(<2 x ptr> [[BROADCAST_SPLAT]], i32 8, <2 x i1> [[MASK:%.*]], <2 x i64> undef)
; CHECK-NEXT: ret <2 x i64> [[RES]]
;
- %broadcast.splatinsert = insertelement <2 x i64*> poison, i64 *%src, i32 0
- %broadcast.splat = shufflevector <2 x i64*> %broadcast.splatinsert, <2 x i64*> poison, <2 x i32> zeroinitializer
- %res = call <2 x i64> @llvm.masked.gather.v2i64(<2 x i64*> %broadcast.splat, i32 8, <2 x i1> %mask, <2 x i64> undef)
+ %broadcast.splatinsert = insertelement <2 x ptr> poison, ptr %src, i32 0
+ %broadcast.splat = shufflevector <2 x ptr> %broadcast.splatinsert, <2 x ptr> poison, <2 x i32> zeroinitializer
+ %res = call <2 x i64> @llvm.masked.gather.v2i64(<2 x ptr> %broadcast.splat, i32 8, <2 x i1> %mask, <2 x i64> undef)
ret <2 x i64> %res
}
; Function Attrs:
-declare <vscale x 2 x i64> @llvm.masked.gather.nxv2i64(<vscale x 2 x i64*>, i32, <vscale x 2 x i1>, <vscale x 2 x i64>)
-declare <2 x i64> @llvm.masked.gather.v2i64(<2 x i64*>, i32, <2 x i1>, <2 x i64>)
+declare <vscale x 2 x i64> @llvm.masked.gather.nxv2i64(<vscale x 2 x ptr>, i32, <vscale x 2 x i1>, <vscale x 2 x i64>)
+declare <2 x i64> @llvm.masked.gather.v2i64(<2 x ptr>, i32, <2 x i1>, <2 x i64>)
@g0 = global <4 x i32> zeroinitializer, align 16
-define inreg <4 x i32> @mload1(<4 x i32>* nocapture readonly %a0) #0 {
+define inreg <4 x i32> @mload1(ptr nocapture readonly %a0) #0 {
; CHECK-LABEL: @mload1(
; CHECK-NEXT: b0:
-; CHECK-NEXT: [[UNMASKEDLOAD:%.*]] = load <4 x i32>, <4 x i32>* [[A0:%.*]], align 16, !tbaa [[TBAA0:![0-9]+]]
+; CHECK-NEXT: [[UNMASKEDLOAD:%.*]] = load <4 x i32>, ptr [[A0:%.*]], align 16, !tbaa [[TBAA0:![0-9]+]]
; CHECK-NEXT: ret <4 x i32> [[UNMASKEDLOAD]]
;
b0:
- %v0 = call <4 x i32> @llvm.masked.load.v4i1.p0v4i1(<4 x i32>* %a0, i32 16, <4 x i1> <i1 true, i1 true, i1 true, i1 true>, <4 x i32> undef), !tbaa !0
+ %v0 = call <4 x i32> @llvm.masked.load.v4i1.p0(ptr %a0, i32 16, <4 x i1> <i1 true, i1 true, i1 true, i1 true>, <4 x i32> undef), !tbaa !0
ret <4 x i32> %v0
}
define inreg <4 x i32> @mload2() #0 {
; CHECK-LABEL: @mload2(
; CHECK-NEXT: b0:
-; CHECK-NEXT: [[UNMASKEDLOAD:%.*]] = load <4 x i32>, <4 x i32>* @g0, align 16, !tbaa [[TBAA0]]
+; CHECK-NEXT: [[UNMASKEDLOAD:%.*]] = load <4 x i32>, ptr @g0, align 16, !tbaa [[TBAA0]]
; CHECK-NEXT: [[TMP0:%.*]] = insertelement <4 x i32> [[UNMASKEDLOAD]], i32 0, i64 0
; CHECK-NEXT: ret <4 x i32> [[TMP0]]
;
b0:
- %v0 = call <4 x i32> @llvm.masked.load.v4i1.p0v4i1(<4 x i32>* @g0, i32 16, <4 x i1> <i1 false, i1 true, i1 true, i1 true>, <4 x i32> zeroinitializer), !tbaa !0
+ %v0 = call <4 x i32> @llvm.masked.load.v4i1.p0(ptr @g0, i32 16, <4 x i1> <i1 false, i1 true, i1 true, i1 true>, <4 x i32> zeroinitializer), !tbaa !0
ret <4 x i32> %v0
}
-define void @mstore(<4 x i32> %a0, <4 x i32>* nocapture readonly %a1) #0 {
+define void @mstore(<4 x i32> %a0, ptr nocapture readonly %a1) #0 {
; CHECK-LABEL: @mstore(
; CHECK-NEXT: b0:
-; CHECK-NEXT: store <4 x i32> [[A0:%.*]], <4 x i32>* [[A1:%.*]], align 16, !tbaa [[TBAA0]]
+; CHECK-NEXT: store <4 x i32> [[A0:%.*]], ptr [[A1:%.*]], align 16, !tbaa [[TBAA0]]
; CHECK-NEXT: ret void
;
b0:
- call void @llvm.masked.store.v4i1.p0v4i1(<4 x i32> %a0, <4 x i32>* %a1, i32 16, <4 x i1> <i1 true, i1 true, i1 true, i1 true>), !tbaa !0
+ call void @llvm.masked.store.v4i1.p0(<4 x i32> %a0, ptr %a1, i32 16, <4 x i1> <i1 true, i1 true, i1 true, i1 true>), !tbaa !0
ret void
}
attributes #0 = { norecurse nounwind }
-declare <4 x i32> @llvm.masked.load.v4i1.p0v4i1(<4 x i32>*, i32, <4 x i1>, <4 x i32>)
-declare void @llvm.masked.store.v4i1.p0v4i1(<4 x i32>, <4 x i32>*, i32, <4 x i1>)
+declare <4 x i32> @llvm.masked.load.v4i1.p0(ptr, i32, <4 x i1>, <4 x i32>)
+declare void @llvm.masked.store.v4i1.p0(<4 x i32>, ptr, i32, <4 x i1>)
!0 = !{!1, !1, i64 0}
!1 = !{!"omnipotent char", !2, i64 0}
; CHECK-LABEL: @reduce_precision_multi_use_0(
; CHECK-NEXT: [[X_EXT:%.*]] = fpext float [[X:%.*]] to double
; CHECK-NEXT: [[Y_EXT:%.*]] = fpext float [[Y:%.*]] to double
-; CHECK-NEXT: store double [[X_EXT]], double* undef, align 8
+; CHECK-NEXT: store double [[X_EXT]], ptr undef, align 8
; CHECK-NEXT: [[MAXNUM:%.*]] = call double @llvm.maxnum.f64(double [[X_EXT]], double [[Y_EXT]])
; CHECK-NEXT: [[TRUNC:%.*]] = fptrunc double [[MAXNUM]] to float
; CHECK-NEXT: ret float [[TRUNC]]
;
%x.ext = fpext float %x to double
%y.ext = fpext float %y to double
- store double %x.ext, double* undef
+ store double %x.ext, ptr undef
%maxnum = call double @llvm.maxnum.f64(double %x.ext, double %y.ext)
%trunc = fptrunc double %maxnum to float
ret float %trunc
; CHECK-LABEL: @reduce_precision_multi_use_1(
; CHECK-NEXT: [[X_EXT:%.*]] = fpext float [[X:%.*]] to double
; CHECK-NEXT: [[Y_EXT:%.*]] = fpext float [[Y:%.*]] to double
-; CHECK-NEXT: store double [[Y_EXT]], double* undef, align 8
+; CHECK-NEXT: store double [[Y_EXT]], ptr undef, align 8
; CHECK-NEXT: [[MAXNUM:%.*]] = call double @llvm.maxnum.f64(double [[X_EXT]], double [[Y_EXT]])
; CHECK-NEXT: [[TRUNC:%.*]] = fptrunc double [[MAXNUM]] to float
; CHECK-NEXT: ret float [[TRUNC]]
;
%x.ext = fpext float %x to double
%y.ext = fpext float %y to double
- store double %y.ext, double* undef
+ store double %y.ext, ptr undef
%maxnum = call double @llvm.maxnum.f64(double %x.ext, double %y.ext)
%trunc = fptrunc double %maxnum to float
ret float %trunc
; NOTE: Assertions have been autogenerated by utils/update_test_checks.py
; RUN: opt -passes=instcombine -S < %s | FileCheck %s
-declare i32 @memcmp(i8 addrspace(1)* nocapture, i8 addrspace(1)* nocapture, i64)
+declare i32 @memcmp(ptr addrspace(1) nocapture, ptr addrspace(1) nocapture, i64)
-define i32 @memcmp_const_size_update_deref(i8 addrspace(1)* nocapture readonly %d, i8 addrspace(1)* nocapture readonly %s) {
+define i32 @memcmp_const_size_update_deref(ptr addrspace(1) nocapture readonly %d, ptr addrspace(1) nocapture readonly %s) {
; CHECK-LABEL: @memcmp_const_size_update_deref(
-; CHECK-NEXT: [[CALL:%.*]] = tail call i32 @memcmp(i8 addrspace(1)* noundef dereferenceable(16) dereferenceable_or_null(40) [[D:%.*]], i8 addrspace(1)* noundef dereferenceable(16) [[S:%.*]], i64 16)
+; CHECK-NEXT: [[CALL:%.*]] = tail call i32 @memcmp(ptr addrspace(1) noundef dereferenceable(16) dereferenceable_or_null(40) [[D:%.*]], ptr addrspace(1) noundef dereferenceable(16) [[S:%.*]], i64 16)
; CHECK-NEXT: ret i32 [[CALL]]
;
- %call = tail call i32 @memcmp(i8 addrspace(1)* dereferenceable_or_null(40) %d, i8 addrspace(1)* %s, i64 16)
+ %call = tail call i32 @memcmp(ptr addrspace(1) dereferenceable_or_null(40) %d, ptr addrspace(1) %s, i64 16)
ret i32 %call
}
-define i32 @memcmp_nonconst_size_nonnnull(i8 addrspace(1)* nocapture readonly %d, i8 addrspace(1)* nocapture readonly %s, i64 %n) {
+define i32 @memcmp_nonconst_size_nonnnull(ptr addrspace(1) nocapture readonly %d, ptr addrspace(1) nocapture readonly %s, i64 %n) {
; CHECK-LABEL: @memcmp_nonconst_size_nonnnull(
-; CHECK-NEXT: [[CALL:%.*]] = tail call i32 @memcmp(i8 addrspace(1)* nonnull dereferenceable_or_null(40) [[D:%.*]], i8 addrspace(1)* nonnull [[S:%.*]], i64 [[N:%.*]])
+; CHECK-NEXT: [[CALL:%.*]] = tail call i32 @memcmp(ptr addrspace(1) nonnull dereferenceable_or_null(40) [[D:%.*]], ptr addrspace(1) nonnull [[S:%.*]], i64 [[N:%.*]])
; CHECK-NEXT: ret i32 [[CALL]]
;
- %call = tail call i32 @memcmp(i8 addrspace(1)* nonnull dereferenceable_or_null(40) %d, i8 addrspace(1)* nonnull %s, i64 %n)
+ %call = tail call i32 @memcmp(ptr addrspace(1) nonnull dereferenceable_or_null(40) %d, ptr addrspace(1) nonnull %s, i64 %n)
ret i32 %call
}
; NOTE: Assertions have been autogenerated by utils/update_test_checks.py
; RUN: opt -passes=instcombine -S < %s | FileCheck %s
-declare i32 @memcmp(i8* nocapture, i8* nocapture, i64)
-declare i8* @memcpy(i8* nocapture, i8* nocapture, i64)
-declare i8* @memmove(i8* nocapture, i8* nocapture, i64)
-declare i8* @memset(i8* nocapture, i32, i64)
-declare i8* @memchr(i8* nocapture, i32, i64)
-declare void @llvm.memcpy.p0i8.p0i8.i64(i8* nocapture, i8* nocapture, i64, i1)
-declare void @llvm.memmove.p0i8.p0i8.i64(i8* nocapture, i8* nocapture, i64, i1)
-declare void @llvm.memset.p0i8.i64(i8* nocapture, i8, i64, i1)
-
-define i32 @memcmp_const_size_set_deref(i8* nocapture readonly %d, i8* nocapture readonly %s) {
+declare i32 @memcmp(ptr nocapture, ptr nocapture, i64)
+declare ptr @memcpy(ptr nocapture, ptr nocapture, i64)
+declare ptr @memmove(ptr nocapture, ptr nocapture, i64)
+declare ptr @memset(ptr nocapture, i32, i64)
+declare ptr @memchr(ptr nocapture, i32, i64)
+declare void @llvm.memcpy.p0.p0.i64(ptr nocapture, ptr nocapture, i64, i1)
+declare void @llvm.memmove.p0.p0.i64(ptr nocapture, ptr nocapture, i64, i1)
+declare void @llvm.memset.p0.i64(ptr nocapture, i8, i64, i1)
+
+define i32 @memcmp_const_size_set_deref(ptr nocapture readonly %d, ptr nocapture readonly %s) {
; CHECK-LABEL: @memcmp_const_size_set_deref(
-; CHECK-NEXT: [[CALL:%.*]] = tail call i32 @memcmp(i8* noundef nonnull dereferenceable(16) [[D:%.*]], i8* noundef nonnull dereferenceable(16) [[S:%.*]], i64 16)
+; CHECK-NEXT: [[CALL:%.*]] = tail call i32 @memcmp(ptr noundef nonnull dereferenceable(16) [[D:%.*]], ptr noundef nonnull dereferenceable(16) [[S:%.*]], i64 16)
; CHECK-NEXT: ret i32 [[CALL]]
;
- %call = tail call i32 @memcmp(i8* %d, i8* %s, i64 16)
+ %call = tail call i32 @memcmp(ptr %d, ptr %s, i64 16)
ret i32 %call
}
-define i32 @memcmp_const_size_update_deref(i8* nocapture readonly %d, i8* nocapture readonly %s) {
+define i32 @memcmp_const_size_update_deref(ptr nocapture readonly %d, ptr nocapture readonly %s) {
; CHECK-LABEL: @memcmp_const_size_update_deref(
-; CHECK-NEXT: [[CALL:%.*]] = tail call i32 @memcmp(i8* noundef nonnull dereferenceable(16) [[D:%.*]], i8* noundef nonnull dereferenceable(16) [[S:%.*]], i64 16)
+; CHECK-NEXT: [[CALL:%.*]] = tail call i32 @memcmp(ptr noundef nonnull dereferenceable(16) [[D:%.*]], ptr noundef nonnull dereferenceable(16) [[S:%.*]], i64 16)
; CHECK-NEXT: ret i32 [[CALL]]
;
- %call = tail call i32 @memcmp(i8* dereferenceable(4) %d, i8* dereferenceable(8) %s, i64 16)
+ %call = tail call i32 @memcmp(ptr dereferenceable(4) %d, ptr dereferenceable(8) %s, i64 16)
ret i32 %call
}
-define i32 @memcmp_const_size_update_deref2(i8* nocapture readonly %d, i8* nocapture readonly %s) {
+define i32 @memcmp_const_size_update_deref2(ptr nocapture readonly %d, ptr nocapture readonly %s) {
; CHECK-LABEL: @memcmp_const_size_update_deref2(
-; CHECK-NEXT: [[CALL:%.*]] = tail call i32 @memcmp(i8* noundef nonnull dereferenceable(16) [[D:%.*]], i8* noundef nonnull dereferenceable(16) [[S:%.*]], i64 16)
+; CHECK-NEXT: [[CALL:%.*]] = tail call i32 @memcmp(ptr noundef nonnull dereferenceable(16) [[D:%.*]], ptr noundef nonnull dereferenceable(16) [[S:%.*]], i64 16)
; CHECK-NEXT: ret i32 [[CALL]]
;
- %call = tail call i32 @memcmp(i8* %d, i8* dereferenceable_or_null(8) %s, i64 16)
+ %call = tail call i32 @memcmp(ptr %d, ptr dereferenceable_or_null(8) %s, i64 16)
ret i32 %call
}
-define i32 @memcmp_const_size_update_deref3(i8* nocapture readonly %d, i8* nocapture readonly %s) {
+define i32 @memcmp_const_size_update_deref3(ptr nocapture readonly %d, ptr nocapture readonly %s) {
; CHECK-LABEL: @memcmp_const_size_update_deref3(
-; CHECK-NEXT: [[CALL:%.*]] = tail call i32 @memcmp(i8* noundef nonnull dereferenceable(40) [[D:%.*]], i8* noundef nonnull dereferenceable(16) [[S:%.*]], i64 16)
+; CHECK-NEXT: [[CALL:%.*]] = tail call i32 @memcmp(ptr noundef nonnull dereferenceable(40) [[D:%.*]], ptr noundef nonnull dereferenceable(16) [[S:%.*]], i64 16)
; CHECK-NEXT: ret i32 [[CALL]]
;
- %call = tail call i32 @memcmp(i8* dereferenceable(40) %d, i8* %s, i64 16)
+ %call = tail call i32 @memcmp(ptr dereferenceable(40) %d, ptr %s, i64 16)
ret i32 %call
}
-define i32 @memcmp_const_size_update_deref4(i8* nocapture readonly %d, i8* nocapture readonly %s) {
+define i32 @memcmp_const_size_update_deref4(ptr nocapture readonly %d, ptr nocapture readonly %s) {
; CHECK-LABEL: @memcmp_const_size_update_deref4(
-; CHECK-NEXT: [[CALL:%.*]] = tail call i32 @memcmp(i8* noundef nonnull dereferenceable(16) [[D:%.*]], i8* noundef nonnull dereferenceable(16) [[S:%.*]], i64 16)
+; CHECK-NEXT: [[CALL:%.*]] = tail call i32 @memcmp(ptr noundef nonnull dereferenceable(16) [[D:%.*]], ptr noundef nonnull dereferenceable(16) [[S:%.*]], i64 16)
; CHECK-NEXT: ret i32 [[CALL]]
;
- %call = tail call i32 @memcmp(i8* dereferenceable_or_null(16) %d, i8* %s, i64 16)
+ %call = tail call i32 @memcmp(ptr dereferenceable_or_null(16) %d, ptr %s, i64 16)
ret i32 %call
}
-define i32 @memcmp_const_size_update_deref5(i8* nocapture readonly %d, i8* nocapture readonly %s) {
+define i32 @memcmp_const_size_update_deref5(ptr nocapture readonly %d, ptr nocapture readonly %s) {
; CHECK-LABEL: @memcmp_const_size_update_deref5(
-; CHECK-NEXT: [[CALL:%.*]] = tail call i32 @memcmp(i8* noundef nonnull dereferenceable(40) [[D:%.*]], i8* noundef nonnull dereferenceable(16) [[S:%.*]], i64 16)
+; CHECK-NEXT: [[CALL:%.*]] = tail call i32 @memcmp(ptr noundef nonnull dereferenceable(40) [[D:%.*]], ptr noundef nonnull dereferenceable(16) [[S:%.*]], i64 16)
; CHECK-NEXT: ret i32 [[CALL]]
;
- %call = tail call i32 @memcmp(i8* dereferenceable_or_null(40) %d, i8* %s, i64 16)
+ %call = tail call i32 @memcmp(ptr dereferenceable_or_null(40) %d, ptr %s, i64 16)
ret i32 %call
}
-define i32 @memcmp_const_size_update_deref6(i8* nocapture readonly %d, i8* nocapture readonly %s) null_pointer_is_valid {
+define i32 @memcmp_const_size_update_deref6(ptr nocapture readonly %d, ptr nocapture readonly %s) null_pointer_is_valid {
; CHECK-LABEL: @memcmp_const_size_update_deref6(
-; CHECK-NEXT: [[CALL:%.*]] = tail call i32 @memcmp(i8* noundef dereferenceable(16) dereferenceable_or_null(40) [[D:%.*]], i8* noundef dereferenceable(16) [[S:%.*]], i64 16)
+; CHECK-NEXT: [[CALL:%.*]] = tail call i32 @memcmp(ptr noundef dereferenceable(16) dereferenceable_or_null(40) [[D:%.*]], ptr noundef dereferenceable(16) [[S:%.*]], i64 16)
; CHECK-NEXT: ret i32 [[CALL]]
;
- %call = tail call i32 @memcmp(i8* dereferenceable_or_null(40) %d, i8* %s, i64 16)
+ %call = tail call i32 @memcmp(ptr dereferenceable_or_null(40) %d, ptr %s, i64 16)
ret i32 %call
}
-define i32 @memcmp_const_size_update_deref7(i8* nocapture readonly %d, i8* nocapture readonly %s) null_pointer_is_valid {
+define i32 @memcmp_const_size_update_deref7(ptr nocapture readonly %d, ptr nocapture readonly %s) null_pointer_is_valid {
; CHECK-LABEL: @memcmp_const_size_update_deref7(
-; CHECK-NEXT: [[CALL:%.*]] = tail call i32 @memcmp(i8* noundef nonnull dereferenceable(40) [[D:%.*]], i8* noundef dereferenceable(16) [[S:%.*]], i64 16)
+; CHECK-NEXT: [[CALL:%.*]] = tail call i32 @memcmp(ptr noundef nonnull dereferenceable(40) [[D:%.*]], ptr noundef dereferenceable(16) [[S:%.*]], i64 16)
; CHECK-NEXT: ret i32 [[CALL]]
;
- %call = tail call i32 @memcmp(i8* nonnull dereferenceable_or_null(40) %d, i8* %s, i64 16)
+ %call = tail call i32 @memcmp(ptr nonnull dereferenceable_or_null(40) %d, ptr %s, i64 16)
ret i32 %call
}
-define i32 @memcmp_const_size_no_update_deref(i8* nocapture readonly %d, i8* nocapture readonly %s) {
+define i32 @memcmp_const_size_no_update_deref(ptr nocapture readonly %d, ptr nocapture readonly %s) {
; CHECK-LABEL: @memcmp_const_size_no_update_deref(
-; CHECK-NEXT: [[CALL:%.*]] = tail call i32 @memcmp(i8* noundef nonnull dereferenceable(40) [[D:%.*]], i8* noundef nonnull dereferenceable(20) [[S:%.*]], i64 16)
+; CHECK-NEXT: [[CALL:%.*]] = tail call i32 @memcmp(ptr noundef nonnull dereferenceable(40) [[D:%.*]], ptr noundef nonnull dereferenceable(20) [[S:%.*]], i64 16)
; CHECK-NEXT: ret i32 [[CALL]]
;
- %call = tail call i32 @memcmp(i8* dereferenceable(40) %d, i8* dereferenceable(20) %s, i64 16)
+ %call = tail call i32 @memcmp(ptr dereferenceable(40) %d, ptr dereferenceable(20) %s, i64 16)
ret i32 %call
}
-define i32 @memcmp_nonconst_size(i8* nocapture readonly %d, i8* nocapture readonly %s, i64 %n) {
+define i32 @memcmp_nonconst_size(ptr nocapture readonly %d, ptr nocapture readonly %s, i64 %n) {
; CHECK-LABEL: @memcmp_nonconst_size(
-; CHECK-NEXT: [[CALL:%.*]] = tail call i32 @memcmp(i8* [[D:%.*]], i8* [[S:%.*]], i64 [[N:%.*]])
+; CHECK-NEXT: [[CALL:%.*]] = tail call i32 @memcmp(ptr [[D:%.*]], ptr [[S:%.*]], i64 [[N:%.*]])
; CHECK-NEXT: ret i32 [[CALL]]
;
- %call = tail call i32 @memcmp(i8* %d, i8* %s, i64 %n)
+ %call = tail call i32 @memcmp(ptr %d, ptr %s, i64 %n)
ret i32 %call
}
-define i8* @memcpy_const_size_set_deref(i8* nocapture readonly %d, i8* nocapture readonly %s) {
+define ptr @memcpy_const_size_set_deref(ptr nocapture readonly %d, ptr nocapture readonly %s) {
; CHECK-LABEL: @memcpy_const_size_set_deref(
-; CHECK-NEXT: tail call void @llvm.memcpy.p0i8.p0i8.i64(i8* noundef nonnull align 1 dereferenceable(64) [[D:%.*]], i8* noundef nonnull align 1 dereferenceable(64) [[S:%.*]], i64 64, i1 false)
-; CHECK-NEXT: ret i8* [[D]]
+; CHECK-NEXT: tail call void @llvm.memcpy.p0.p0.i64(ptr noundef nonnull align 1 dereferenceable(64) [[D:%.*]], ptr noundef nonnull align 1 dereferenceable(64) [[S:%.*]], i64 64, i1 false)
+; CHECK-NEXT: ret ptr [[D]]
;
- %call = tail call i8* @memcpy(i8* %d, i8* %s, i64 64)
- ret i8* %call
+ %call = tail call ptr @memcpy(ptr %d, ptr %s, i64 64)
+ ret ptr %call
}
-define i8* @memmove_const_size_set_deref(i8* nocapture readonly %d, i8* nocapture readonly %s) {
+define ptr @memmove_const_size_set_deref(ptr nocapture readonly %d, ptr nocapture readonly %s) {
; CHECK-LABEL: @memmove_const_size_set_deref(
-; CHECK-NEXT: tail call void @llvm.memmove.p0i8.p0i8.i64(i8* noundef nonnull align 1 dereferenceable(64) [[D:%.*]], i8* noundef nonnull align 1 dereferenceable(64) [[S:%.*]], i64 64, i1 false)
-; CHECK-NEXT: ret i8* [[D]]
+; CHECK-NEXT: tail call void @llvm.memmove.p0.p0.i64(ptr noundef nonnull align 1 dereferenceable(64) [[D:%.*]], ptr noundef nonnull align 1 dereferenceable(64) [[S:%.*]], i64 64, i1 false)
+; CHECK-NEXT: ret ptr [[D]]
;
- %call = tail call i8* @memmove(i8* %d, i8* %s, i64 64)
- ret i8* %call
+ %call = tail call ptr @memmove(ptr %d, ptr %s, i64 64)
+ ret ptr %call
}
-define i8* @memset_const_size_set_deref(i8* nocapture readonly %s, i32 %c) {
+define ptr @memset_const_size_set_deref(ptr nocapture readonly %s, i32 %c) {
; CHECK-LABEL: @memset_const_size_set_deref(
; CHECK-NEXT: [[TMP1:%.*]] = trunc i32 [[C:%.*]] to i8
-; CHECK-NEXT: tail call void @llvm.memset.p0i8.i64(i8* noundef nonnull align 1 dereferenceable(64) [[S:%.*]], i8 [[TMP1]], i64 64, i1 false)
-; CHECK-NEXT: ret i8* [[S]]
+; CHECK-NEXT: tail call void @llvm.memset.p0.i64(ptr noundef nonnull align 1 dereferenceable(64) [[S:%.*]], i8 [[TMP1]], i64 64, i1 false)
+; CHECK-NEXT: ret ptr [[S]]
;
- %call = tail call i8* @memset(i8* %s, i32 %c, i64 64)
- ret i8* %call
+ %call = tail call ptr @memset(ptr %s, i32 %c, i64 64)
+ ret ptr %call
}
-define i8* @memchr_const_size_set_deref(i8* nocapture readonly %s, i32 %c) {
+define ptr @memchr_const_size_set_deref(ptr nocapture readonly %s, i32 %c) {
; CHECK-LABEL: @memchr_const_size_set_deref(
-; CHECK-NEXT: [[CALL:%.*]] = tail call i8* @memchr(i8* noundef nonnull dereferenceable(1) [[S:%.*]], i32 [[C:%.*]], i64 64)
-; CHECK-NEXT: ret i8* [[CALL]]
+; CHECK-NEXT: [[CALL:%.*]] = tail call ptr @memchr(ptr noundef nonnull dereferenceable(1) [[S:%.*]], i32 [[C:%.*]], i64 64)
+; CHECK-NEXT: ret ptr [[CALL]]
;
- %call = tail call i8* @memchr(i8* %s, i32 %c, i64 64)
- ret i8* %call
+ %call = tail call ptr @memchr(ptr %s, i32 %c, i64 64)
+ ret ptr %call
}
-define i8* @llvm_memcpy_const_size_set_deref(i8* nocapture readonly %d, i8* nocapture readonly %s) {
+define ptr @llvm_memcpy_const_size_set_deref(ptr nocapture readonly %d, ptr nocapture readonly %s) {
; CHECK-LABEL: @llvm_memcpy_const_size_set_deref(
-; CHECK-NEXT: call void @llvm.memcpy.p0i8.p0i8.i64(i8* noundef nonnull align 1 dereferenceable(16) [[D:%.*]], i8* noundef nonnull align 1 dereferenceable(16) [[S:%.*]], i64 16, i1 false)
-; CHECK-NEXT: ret i8* [[D]]
+; CHECK-NEXT: call void @llvm.memcpy.p0.p0.i64(ptr noundef nonnull align 1 dereferenceable(16) [[D:%.*]], ptr noundef nonnull align 1 dereferenceable(16) [[S:%.*]], i64 16, i1 false)
+; CHECK-NEXT: ret ptr [[D]]
;
- call void @llvm.memcpy.p0i8.p0i8.i64(i8* align 1 %d, i8* align 1 %s, i64 16, i1 false)
- ret i8* %d
+ call void @llvm.memcpy.p0.p0.i64(ptr align 1 %d, ptr align 1 %s, i64 16, i1 false)
+ ret ptr %d
}
-define i8* @llvm_memmove_const_size_set_deref(i8* nocapture readonly %d, i8* nocapture readonly %s) {
+define ptr @llvm_memmove_const_size_set_deref(ptr nocapture readonly %d, ptr nocapture readonly %s) {
; CHECK-LABEL: @llvm_memmove_const_size_set_deref(
-; CHECK-NEXT: call void @llvm.memmove.p0i8.p0i8.i64(i8* noundef nonnull align 1 dereferenceable(16) [[D:%.*]], i8* noundef nonnull align 1 dereferenceable(16) [[S:%.*]], i64 16, i1 false)
-; CHECK-NEXT: ret i8* [[D]]
+; CHECK-NEXT: call void @llvm.memmove.p0.p0.i64(ptr noundef nonnull align 1 dereferenceable(16) [[D:%.*]], ptr noundef nonnull align 1 dereferenceable(16) [[S:%.*]], i64 16, i1 false)
+; CHECK-NEXT: ret ptr [[D]]
;
- call void @llvm.memmove.p0i8.p0i8.i64(i8* align 1 %d, i8* align 1 %s, i64 16, i1 false)
- ret i8* %d
+ call void @llvm.memmove.p0.p0.i64(ptr align 1 %d, ptr align 1 %s, i64 16, i1 false)
+ ret ptr %d
}
-define i8* @llvm_memset_const_size_set_deref(i8* nocapture readonly %s, i8 %c) {
+define ptr @llvm_memset_const_size_set_deref(ptr nocapture readonly %s, i8 %c) {
; CHECK-LABEL: @llvm_memset_const_size_set_deref(
-; CHECK-NEXT: call void @llvm.memset.p0i8.i64(i8* noundef nonnull align 1 dereferenceable(16) [[S:%.*]], i8 [[C:%.*]], i64 16, i1 false)
-; CHECK-NEXT: ret i8* [[S]]
+; CHECK-NEXT: call void @llvm.memset.p0.i64(ptr noundef nonnull align 1 dereferenceable(16) [[S:%.*]], i8 [[C:%.*]], i64 16, i1 false)
+; CHECK-NEXT: ret ptr [[S]]
;
- call void @llvm.memset.p0i8.i64(i8* align 1 %s, i8 %c, i64 16, i1 false)
- ret i8* %s
+ call void @llvm.memset.p0.i64(ptr align 1 %s, i8 %c, i64 16, i1 false)
+ ret ptr %s
}
@f.a = private unnamed_addr constant [1 x i32] [i32 12], align 4
@f.b = private unnamed_addr constant [1 x i32] [i32 55], align 4
-@f.c = linkonce unnamed_addr alias [1 x i32], [1 x i32]* @f.b
+@f.c = linkonce unnamed_addr alias [1 x i32], ptr @f.b
define signext i32 @test1(i32 signext %x) #0 {
; CHECK-LABEL: @test1(
; CHECK-NEXT: ret i32 12
;
%idxprom = sext i32 %x to i64
- %arrayidx = getelementptr inbounds [1 x i32], [1 x i32]* @f.a, i64 0, i64 %idxprom
- %r = load i32, i32* %arrayidx, align 4
+ %arrayidx = getelementptr inbounds [1 x i32], ptr @f.a, i64 0, i64 %idxprom
+ %r = load i32, ptr %arrayidx, align 4
ret i32 %r
}
-declare void @foo(i64* %p)
+declare void @foo(ptr %p)
define void @test2(i32 signext %x, i64 %v) #0 {
; CHECK-LABEL: @test2(
; CHECK-NEXT: [[P:%.*]] = alloca i64, align 8
-; CHECK-NEXT: store i64 [[V:%.*]], i64* [[P]], align 8
-; CHECK-NEXT: call void @foo(i64* nonnull [[P]]) #1
+; CHECK-NEXT: store i64 [[V:%.*]], ptr [[P]], align 8
+; CHECK-NEXT: call void @foo(ptr nonnull [[P]]) #1
; CHECK-NEXT: ret void
;
%p = alloca i64
%idxprom = sext i32 %x to i64
- %arrayidx = getelementptr inbounds i64, i64* %p, i64 %idxprom
- store i64 %v, i64* %arrayidx
- call void @foo(i64* %p)
+ %arrayidx = getelementptr inbounds i64, ptr %p, i64 %idxprom
+ store i64 %v, ptr %arrayidx
+ call void @foo(ptr %p)
ret void
}
; CHECK-NEXT: ret i32 [[R]]
;
%idxprom = sext i32 %x to i64
- %p = select i1 %y, [1 x i32]* @f.a, [1 x i32]* @f.b
- %arrayidx = getelementptr inbounds [1 x i32], [1 x i32]* %p, i64 0, i64 %idxprom
- %r = load i32, i32* %arrayidx, align 4
+ %p = select i1 %y, ptr @f.a, ptr @f.b
+ %arrayidx = getelementptr inbounds [1 x i32], ptr %p, i64 0, i64 %idxprom
+ %r = load i32, ptr %arrayidx, align 4
ret i32 %r
}
define signext i32 @test4(i32 signext %x, i1 %y) #0 {
; CHECK-LABEL: @test4(
; CHECK-NEXT: [[IDXPROM:%.*]] = sext i32 [[X:%.*]] to i64
-; CHECK-NEXT: [[ARRAYIDX:%.*]] = getelementptr inbounds [1 x i32], [1 x i32]* @f.c, i64 0, i64 [[IDXPROM]]
-; CHECK-NEXT: [[R:%.*]] = load i32, i32* [[ARRAYIDX]], align 4
+; CHECK-NEXT: [[ARRAYIDX:%.*]] = getelementptr inbounds [1 x i32], ptr @f.c, i64 0, i64 [[IDXPROM]]
+; CHECK-NEXT: [[R:%.*]] = load i32, ptr [[ARRAYIDX]], align 4
; CHECK-NEXT: ret i32 [[R]]
;
%idxprom = sext i32 %x to i64
- %arrayidx = getelementptr inbounds [1 x i32], [1 x i32]* @f.c, i64 0, i64 %idxprom
- %r = load i32, i32* %arrayidx, align 4
+ %arrayidx = getelementptr inbounds [1 x i32], ptr @f.c, i64 0, i64 %idxprom
+ %r = load i32, ptr %arrayidx, align 4
ret i32 %r
}
; }
; CHECK: for.body:
-; CHECK: %{{.*}} = load i16, i16* %{{.*}}, align 1, !llvm.access.group !1
-; CHECK: store i16 %{{.*}}, i16* %{{.*}}, align 1, !llvm.access.group !1
+; CHECK: %{{.*}} = load i16, ptr %{{.*}}, align 1, !llvm.access.group !1
+; CHECK: store i16 %{{.*}}, ptr %{{.*}}, align 1, !llvm.access.group !1
; ModuleID = '<stdin>'
target triple = "x86_64-unknown-linux-gnu"
; Function Attrs: nounwind uwtable
-define void @_Z4testPcl(i8* %out, i64 %size) #0 {
+define void @_Z4testPcl(ptr %out, i64 %size) #0 {
entry:
br label %for.cond
br i1 %cmp, label %for.body, label %for.end
for.body: ; preds = %for.cond
- %arrayidx = getelementptr inbounds i8, i8* %out, i64 %i.0
+ %arrayidx = getelementptr inbounds i8, ptr %out, i64 %i.0
%add = add nsw i64 %i.0, %size
- %arrayidx1 = getelementptr inbounds i8, i8* %out, i64 %add
- call void @llvm.memcpy.p0i8.p0i8.i64(i8* %arrayidx, i8* %arrayidx1, i64 2, i1 false), !llvm.access.group !4
+ %arrayidx1 = getelementptr inbounds i8, ptr %out, i64 %add
+ call void @llvm.memcpy.p0.p0.i64(ptr %arrayidx, ptr %arrayidx1, i64 2, i1 false), !llvm.access.group !4
br label %for.inc
for.inc: ; preds = %for.body
}
; Function Attrs: argmemonly nounwind
-declare void @llvm.memcpy.p0i8.p0i8.i64(i8* nocapture writeonly, i8* nocapture readonly, i64, i1) #1
+declare void @llvm.memcpy.p0.p0.i64(ptr nocapture writeonly, ptr nocapture readonly, i64, i1) #1
attributes #0 = { nounwind uwtable "disable-tail-calls"="false" "less-precise-fpmad"="false" "frame-pointer"="all" "no-infs-fp-math"="false" "no-jump-tables"="false" "no-nans-fp-math"="false" "no-signed-zeros-fp-math"="false" "stack-protector-buffer-size"="8" "target-cpu"="x86-64" "target-features"="+fxsr,+mmx,+sse,+sse2,+x87" "unsafe-fp-math"="false" "use-soft-float"="false" }
attributes #1 = { argmemonly nounwind }
; in equality expressions don't cause trouble and either are folded when
; they might be valid or not when they're provably undefined.
-declare i8* @memchr(i8*, i32, i64)
+declare ptr @memchr(ptr, i32, i64)
@a5 = constant [5 x i8] c"12345"
; CHECK-LABEL: @call_memchr_ap5_c_1_eq_a(
; CHECK-NEXT: ret i1
;
- %pap5 = getelementptr [5 x i8], [5 x i8]* @a5, i32 0, i32 5
- %qap5 = getelementptr [5 x i8], [5 x i8]* @a5, i32 1, i32 0
- %q = call i8* @memchr(i8* %pap5, i32 %c, i64 1)
- %cmp = icmp eq i8* %q, %qap5
+ %pap5 = getelementptr [5 x i8], ptr @a5, i32 0, i32 5
+ %qap5 = getelementptr [5 x i8], ptr @a5, i32 1, i32 0
+ %q = call ptr @memchr(ptr %pap5, i32 %c, i64 1)
+ %cmp = icmp eq ptr %q, %qap5
ret i1 %cmp
}
; CHECK-LABEL: @call_memchr_ap5_c_5_eq_a(
; CHECK-NEXT: ret i1
;
- %pap5 = getelementptr [5 x i8], [5 x i8]* @a5, i32 0, i32 5
- %qap5 = getelementptr [5 x i8], [5 x i8]* @a5, i32 1, i32 0
- %q = call i8* @memchr(i8* %pap5, i32 %c, i64 5)
- %cmp = icmp eq i8* %q, %qap5
+ %pap5 = getelementptr [5 x i8], ptr @a5, i32 0, i32 5
+ %qap5 = getelementptr [5 x i8], ptr @a5, i32 1, i32 0
+ %q = call ptr @memchr(ptr %pap5, i32 %c, i64 5)
+ %cmp = icmp eq ptr %q, %qap5
ret i1 %cmp
}
; CHECK-LABEL: @fold_memchr_ap5_c_n_eq_a(
; CHECK-NEXT: ret i1 false
;
- %pa = getelementptr [5 x i8], [5 x i8]* @a5, i32 0, i32 0
- %pap5 = getelementptr [5 x i8], [5 x i8]* @a5, i32 0, i32 5
- %q = call i8* @memchr(i8* %pap5, i32 %c, i64 %n)
- %cmp = icmp eq i8* %q, %pa
+ %pap5 = getelementptr [5 x i8], ptr @a5, i32 0, i32 5
+ %q = call ptr @memchr(ptr %pap5, i32 %c, i64 %n)
+ %cmp = icmp eq ptr %q, @a5
ret i1 %cmp
}
; CHECK-LABEL: @fold_memchr_ap5_c_n_eqz(
; CHECK-NEXT: ret i1 true
;
- %p = getelementptr [5 x i8], [5 x i8]* @a5, i32 0, i32 5
- %q = call i8* @memchr(i8* %p, i32 %c, i64 %n)
- %cmp = icmp eq i8* %q, null
+ %p = getelementptr [5 x i8], ptr @a5, i32 0, i32 5
+ %q = call ptr @memchr(ptr %p, i32 %c, i64 %n)
+ %cmp = icmp eq ptr %q, null
ret i1 %cmp
}
; CHECK-LABEL: @fold_memchr_a_nul_n_eqz(
; CHECK-NEXT: ret i1 true
;
- %p = getelementptr [5 x i8], [5 x i8]* @a5, i32 0, i32 5
- %q = call i8* @memchr(i8* %p, i32 0, i64 %n)
- %cmp = icmp eq i8* %q, null
+ %p = getelementptr [5 x i8], ptr @a5, i32 0, i32 5
+ %q = call ptr @memchr(ptr %p, i32 0, i64 %n)
+ %cmp = icmp eq ptr %q, null
ret i1 %cmp
}
; Verify that the result of memchr calls used in equality expressions
; with either the first argument or null are optimally folded.
-declare i8* @memchr(i8*, i32, i64)
+declare ptr @memchr(ptr, i32, i64)
@a5 = constant [5 x i8] c"12345"
; CHECK-NEXT: [[CHAR0CMP:%.*]] = icmp eq i8 [[TMP1]], 49
; CHECK-NEXT: ret i1 [[CHAR0CMP]]
;
- %p = getelementptr [5 x i8], [5 x i8]* @a5, i32 0, i32 0
- %q = call i8* @memchr(i8* %p, i32 %c, i64 5)
- %cmp = icmp eq i8* %q, %p
+ %q = call ptr @memchr(ptr @a5, i32 %c, i64 5)
+ %cmp = icmp eq ptr %q, @a5
ret i1 %cmp
}
; CHECK-NEXT: [[TMP3:%.*]] = select i1 [[TMP2]], i1 [[CHAR0CMP]], i1 false
; CHECK-NEXT: ret i1 [[TMP3]]
;
- %p = getelementptr [5 x i8], [5 x i8]* @a5, i32 0, i32 0
- %q = call i8* @memchr(i8* %p, i32 %c, i64 %n)
- %cmp = icmp eq i8* %q, %p
+ %q = call ptr @memchr(ptr @a5, i32 %c, i64 %n)
+ %cmp = icmp eq ptr %q, @a5
ret i1 %cmp
}
define i1 @call_memchr_api_c_n_eq_a(i64 %i, i32 %c, i64 %n) {
; CHECK-LABEL: @call_memchr_api_c_n_eq_a(
-; CHECK-NEXT: [[P:%.*]] = getelementptr [5 x i8], [5 x i8]* @a5, i64 0, i64 [[I:%.*]]
-; CHECK-NEXT: [[Q:%.*]] = call i8* @memchr(i8* [[P]], i32 [[C:%.*]], i64 [[N:%.*]])
-; CHECK-NEXT: [[CMP:%.*]] = icmp eq i8* [[Q]], [[P]]
+; CHECK-NEXT: [[P:%.*]] = getelementptr [5 x i8], ptr @a5, i64 0, i64 [[I:%.*]]
+; CHECK-NEXT: [[Q:%.*]] = call ptr @memchr(ptr [[P]], i32 [[C:%.*]], i64 [[N:%.*]])
+; CHECK-NEXT: [[CMP:%.*]] = icmp eq ptr [[Q]], [[P]]
; CHECK-NEXT: ret i1 [[CMP]]
;
- %p = getelementptr [5 x i8], [5 x i8]* @a5, i64 0, i64 %i
- %q = call i8* @memchr(i8* %p, i32 %c, i64 %n)
- %cmp = icmp eq i8* %q, %p
+ %p = getelementptr [5 x i8], ptr @a5, i64 0, i64 %i
+ %q = call ptr @memchr(ptr %p, i32 %c, i64 %n)
+ %cmp = icmp eq ptr %q, %p
ret i1 %cmp
}
; Fold memchr(s, c, 15) == s to *s == c.
-define i1 @fold_memchr_s_c_15_eq_s(i8* %s, i32 %c) {
+define i1 @fold_memchr_s_c_15_eq_s(ptr %s, i32 %c) {
; CHECK-LABEL: @fold_memchr_s_c_15_eq_s(
-; CHECK-NEXT: [[TMP1:%.*]] = load i8, i8* [[S:%.*]], align 1
+; CHECK-NEXT: [[TMP1:%.*]] = load i8, ptr [[S:%.*]], align 1
; CHECK-NEXT: [[TMP2:%.*]] = trunc i32 [[C:%.*]] to i8
; CHECK-NEXT: [[CHAR0CMP:%.*]] = icmp eq i8 [[TMP1]], [[TMP2]]
; CHECK-NEXT: ret i1 [[CHAR0CMP]]
;
- %p = call i8* @memchr(i8* %s, i32 %c, i64 15)
- %cmp = icmp eq i8* %p, %s
+ %p = call ptr @memchr(ptr %s, i32 %c, i64 15)
+ %cmp = icmp eq ptr %p, %s
ret i1 %cmp
}
; Fold memchr(s, c, 17) != s to *s != c.
-define i1 @fold_memchr_s_c_17_neq_s(i8* %s, i32 %c) {
+define i1 @fold_memchr_s_c_17_neq_s(ptr %s, i32 %c) {
; CHECK-LABEL: @fold_memchr_s_c_17_neq_s(
-; CHECK-NEXT: [[TMP1:%.*]] = load i8, i8* [[S:%.*]], align 1
+; CHECK-NEXT: [[TMP1:%.*]] = load i8, ptr [[S:%.*]], align 1
; CHECK-NEXT: [[TMP2:%.*]] = trunc i32 [[C:%.*]] to i8
; CHECK-NEXT: [[CHAR0CMP:%.*]] = icmp ne i8 [[TMP1]], [[TMP2]]
; CHECK-NEXT: ret i1 [[CHAR0CMP]]
;
- %p = call i8* @memchr(i8* %s, i32 %c, i64 17)
- %cmp = icmp ne i8* %p, %s
+ %p = call ptr @memchr(ptr %s, i32 %c, i64 17)
+ %cmp = icmp ne ptr %p, %s
ret i1 %cmp
}
; Fold memchr(s, c, n) == s to *s == c for nonzero n.
-define i1 @fold_memchr_s_c_nz_eq_s(i8* %s, i32 %c, i64 %n) {
+define i1 @fold_memchr_s_c_nz_eq_s(ptr %s, i32 %c, i64 %n) {
; CHECK-LABEL: @fold_memchr_s_c_nz_eq_s(
-; CHECK-NEXT: [[TMP1:%.*]] = load i8, i8* [[S:%.*]], align 1
+; CHECK-NEXT: [[TMP1:%.*]] = load i8, ptr [[S:%.*]], align 1
; CHECK-NEXT: [[TMP2:%.*]] = trunc i32 [[C:%.*]] to i8
; CHECK-NEXT: [[CHAR0CMP:%.*]] = icmp eq i8 [[TMP1]], [[TMP2]]
; CHECK-NEXT: ret i1 [[CHAR0CMP]]
;
%nz = or i64 %n, 1
- %p = call i8* @memchr(i8* %s, i32 %c, i64 %nz)
- %cmp = icmp eq i8* %p, %s
+ %p = call ptr @memchr(ptr %s, i32 %c, i64 %nz)
+ %cmp = icmp eq ptr %p, %s
ret i1 %cmp
}
; be optimized to the equivalent of N && *S == C provided a short-circuiting
; AND, otherwise the load could read a byte just past the end of an array.
-define i1 @call_memchr_s_c_n_eq_s(i8* %s, i32 %c, i64 %n) {
+define i1 @call_memchr_s_c_n_eq_s(ptr %s, i32 %c, i64 %n) {
; CHECK-LABEL: @call_memchr_s_c_n_eq_s(
-; CHECK-NEXT: [[P:%.*]] = call i8* @memchr(i8* [[S:%.*]], i32 [[C:%.*]], i64 [[N:%.*]])
-; CHECK-NEXT: [[CMP:%.*]] = icmp eq i8* [[P]], [[S]]
+; CHECK-NEXT: [[P:%.*]] = call ptr @memchr(ptr [[S:%.*]], i32 [[C:%.*]], i64 [[N:%.*]])
+; CHECK-NEXT: [[CMP:%.*]] = icmp eq ptr [[P]], [[S]]
; CHECK-NEXT: ret i1 [[CMP]]
;
- %p = call i8* @memchr(i8* %s, i32 %c, i64 %n)
- %cmp = icmp eq i8* %p, %s
+ %p = call ptr @memchr(ptr %s, i32 %c, i64 %n)
+ %cmp = icmp eq ptr %p, %s
ret i1 %cmp
}
; Verify that the special case of memchr calls with the size of 1 are
; folded as expected.
-declare i8* @memchr(i8*, i32, i64)
+declare ptr @memchr(ptr, i32, i64)
@ax = external global [0 x i8]
@a12345 = constant [5 x i8] c"\01\02\03\04\05"
; Fold memchr(a12345, 1, 1) to a12345.
-define i8* @fold_memchr_a12345_1_1() {
+define ptr @fold_memchr_a12345_1_1() {
; CHECK-LABEL: @fold_memchr_a12345_1_1(
-; CHECK-NEXT: ret i8* getelementptr inbounds ([5 x i8], [5 x i8]* @a12345, i64 0, i64 0)
+; CHECK-NEXT: ret ptr @a12345
;
- %ptr = getelementptr [5 x i8], [5 x i8]* @a12345, i32 0, i32 0
- %res = call i8* @memchr(i8* %ptr, i32 1, i64 1)
- ret i8* %res
+ %res = call ptr @memchr(ptr @a12345, i32 1, i64 1)
+ ret ptr %res
}
; Fold memchr(a12345, 2, 1) to null.
-define i8* @fold_memchr_a12345_2_1() {
+define ptr @fold_memchr_a12345_2_1() {
; CHECK-LABEL: @fold_memchr_a12345_2_1(
-; CHECK-NEXT: ret i8* null
+; CHECK-NEXT: ret ptr null
;
- %ptr = getelementptr [5 x i8], [5 x i8]* @a12345, i32 0, i32 0
- %res = call i8* @memchr(i8* %ptr, i32 2, i64 1)
- ret i8* %res
+ %res = call ptr @memchr(ptr @a12345, i32 2, i64 1)
+ ret ptr %res
}
; Fold memchr(ax, 257, 1) to (unsigned char)*ax == 1 ? ax : null
; to verify the constant 257 is converted to unsigned char (yielding 1).
-define i8* @fold_memchr_ax_257_1(i32 %chr, i64 %n) {
+define ptr @fold_memchr_ax_257_1(i32 %chr, i64 %n) {
; CHECK-LABEL: @fold_memchr_ax_257_1(
-; CHECK-NEXT: [[MEMCHR_CHAR0:%.*]] = load i8, i8* getelementptr inbounds ([0 x i8], [0 x i8]* @ax, i64 0, i64 0), align 1
+; CHECK-NEXT: [[MEMCHR_CHAR0:%.*]] = load i8, ptr @ax, align 1
; CHECK-NEXT: [[MEMCHR_CHAR0CMP:%.*]] = icmp eq i8 [[MEMCHR_CHAR0]], 1
-; CHECK-NEXT: [[MEMCHR_SEL:%.*]] = select i1 [[MEMCHR_CHAR0CMP]], i8* getelementptr inbounds ([0 x i8], [0 x i8]* @ax, i64 0, i64 0), i8* null
-; CHECK-NEXT: ret i8* [[MEMCHR_SEL]]
+; CHECK-NEXT: [[MEMCHR_SEL:%.*]] = select i1 [[MEMCHR_CHAR0CMP]], ptr @ax, ptr null
+; CHECK-NEXT: ret ptr [[MEMCHR_SEL]]
;
- %ptr = getelementptr [0 x i8], [0 x i8]* @ax, i32 0, i32 0
- %res = call i8* @memchr(i8* %ptr, i32 257, i64 1)
- ret i8* %res
+ %res = call ptr @memchr(ptr @ax, i32 257, i64 1)
+ ret ptr %res
}
; Fold memchr(ax, c, 1) to (unsigned char)*ax == (unsigned char)c ? ax : null.
-define i8* @fold_memchr_ax_c_1(i32 %chr, i64 %n) {
+define ptr @fold_memchr_ax_c_1(i32 %chr, i64 %n) {
; CHECK-LABEL: @fold_memchr_ax_c_1(
-; CHECK-NEXT: [[MEMCHR_CHAR0:%.*]] = load i8, i8* getelementptr inbounds ([0 x i8], [0 x i8]* @ax, i64 0, i64 0), align 1
+; CHECK-NEXT: [[MEMCHR_CHAR0:%.*]] = load i8, ptr @ax, align 1
; CHECK-NEXT: [[TMP1:%.*]] = trunc i32 [[CHR:%.*]] to i8
; CHECK-NEXT: [[MEMCHR_CHAR0CMP:%.*]] = icmp eq i8 [[MEMCHR_CHAR0]], [[TMP1]]
-; CHECK-NEXT: [[MEMCHR_SEL:%.*]] = select i1 [[MEMCHR_CHAR0CMP]], i8* getelementptr inbounds ([0 x i8], [0 x i8]* @ax, i64 0, i64 0), i8* null
-; CHECK-NEXT: ret i8* [[MEMCHR_SEL]]
+; CHECK-NEXT: [[MEMCHR_SEL:%.*]] = select i1 [[MEMCHR_CHAR0CMP]], ptr @ax, ptr null
+; CHECK-NEXT: ret ptr [[MEMCHR_SEL]]
;
- %ptr = getelementptr [0 x i8], [0 x i8]* @ax, i32 0, i32 0
- %res = call i8* @memchr(i8* %ptr, i32 %chr, i64 1)
- ret i8* %res
+ %res = call ptr @memchr(ptr @ax, i32 %chr, i64 1)
+ ret ptr %res
}
; RUN: opt < %s -passes=instcombine -S -data-layout="E" | FileCheck %s --check-prefixes=BE
; RUN: opt < %s -passes=instcombine -S -data-layout="e" | FileCheck %s --check-prefixes=LE
-declare i8* @memchr(i8*, i32, i64)
+declare ptr @memchr(ptr, i32, i64)
; BE representation: { 'a', 'b', 'c', 'd', 'e', ..., 'p' }
; LE representation: { 'd', 'c', 'b', 'a', 'h', ..., 'm' }
; Fold memchr(a, C, 16) for C in ['a', 'd'] U ['o', 'q'].
-define void @fold_memchr_a(i64* %pcmp) {
+define void @fold_memchr_a(ptr %pcmp) {
; BE-LABEL: @fold_memchr_a(
-; BE-NEXT: store i64 0, i64* [[PCMP:%.*]], align 4
-; BE-NEXT: [[PSTOR1:%.*]] = getelementptr i64, i64* [[PCMP]], i64 1
-; BE-NEXT: store i64 1, i64* [[PSTOR1]], align 4
-; BE-NEXT: [[PSTOR2:%.*]] = getelementptr i64, i64* [[PCMP]], i64 2
-; BE-NEXT: store i64 2, i64* [[PSTOR2]], align 4
-; BE-NEXT: [[PSTOR3:%.*]] = getelementptr i64, i64* [[PCMP]], i64 3
-; BE-NEXT: store i64 3, i64* [[PSTOR3]], align 4
-; BE-NEXT: [[PSTOR4:%.*]] = getelementptr i64, i64* [[PCMP]], i64 4
-; BE-NEXT: store i64 13, i64* [[PSTOR4]], align 4
-; BE-NEXT: [[PSTOR6:%.*]] = getelementptr i64, i64* [[PCMP]], i64 6
-; BE-NEXT: store i64 14, i64* [[PSTOR6]], align 4
-; BE-NEXT: [[PSTOR7:%.*]] = getelementptr i64, i64* [[PCMP]], i64 7
-; BE-NEXT: store i64 15, i64* [[PSTOR7]], align 4
-; BE-NEXT: [[PSTOR8:%.*]] = getelementptr i64, i64* [[PCMP]], i64 8
-; BE-NEXT: store i64 0, i64* [[PSTOR8]], align 4
+; BE-NEXT: store i64 0, ptr [[PCMP:%.*]], align 4
+; BE-NEXT: [[PSTOR1:%.*]] = getelementptr i64, ptr [[PCMP]], i64 1
+; BE-NEXT: store i64 1, ptr [[PSTOR1]], align 4
+; BE-NEXT: [[PSTOR2:%.*]] = getelementptr i64, ptr [[PCMP]], i64 2
+; BE-NEXT: store i64 2, ptr [[PSTOR2]], align 4
+; BE-NEXT: [[PSTOR3:%.*]] = getelementptr i64, ptr [[PCMP]], i64 3
+; BE-NEXT: store i64 3, ptr [[PSTOR3]], align 4
+; BE-NEXT: [[PSTOR4:%.*]] = getelementptr i64, ptr [[PCMP]], i64 4
+; BE-NEXT: store i64 13, ptr [[PSTOR4]], align 4
+; BE-NEXT: [[PSTOR6:%.*]] = getelementptr i64, ptr [[PCMP]], i64 6
+; BE-NEXT: store i64 14, ptr [[PSTOR6]], align 4
+; BE-NEXT: [[PSTOR7:%.*]] = getelementptr i64, ptr [[PCMP]], i64 7
+; BE-NEXT: store i64 15, ptr [[PSTOR7]], align 4
+; BE-NEXT: [[PSTOR8:%.*]] = getelementptr i64, ptr [[PCMP]], i64 8
+; BE-NEXT: store i64 0, ptr [[PSTOR8]], align 4
; BE-NEXT: ret void
;
; LE-LABEL: @fold_memchr_a(
-; LE-NEXT: store i64 3, i64* [[PCMP:%.*]], align 4
-; LE-NEXT: [[PSTOR1:%.*]] = getelementptr i64, i64* [[PCMP]], i64 1
-; LE-NEXT: store i64 2, i64* [[PSTOR1]], align 4
-; LE-NEXT: [[PSTOR2:%.*]] = getelementptr i64, i64* [[PCMP]], i64 2
-; LE-NEXT: store i64 1, i64* [[PSTOR2]], align 4
-; LE-NEXT: [[PSTOR3:%.*]] = getelementptr i64, i64* [[PCMP]], i64 3
-; LE-NEXT: store i64 0, i64* [[PSTOR3]], align 4
-; LE-NEXT: [[PSTOR4:%.*]] = getelementptr i64, i64* [[PCMP]], i64 4
-; LE-NEXT: store i64 14, i64* [[PSTOR4]], align 4
-; LE-NEXT: [[PSTOR6:%.*]] = getelementptr i64, i64* [[PCMP]], i64 6
-; LE-NEXT: store i64 13, i64* [[PSTOR6]], align 4
-; LE-NEXT: [[PSTOR7:%.*]] = getelementptr i64, i64* [[PCMP]], i64 7
-; LE-NEXT: store i64 12, i64* [[PSTOR7]], align 4
-; LE-NEXT: [[PSTOR8:%.*]] = getelementptr i64, i64* [[PCMP]], i64 8
-; LE-NEXT: store i64 0, i64* [[PSTOR8]], align 4
+; LE-NEXT: store i64 3, ptr [[PCMP:%.*]], align 4
+; LE-NEXT: [[PSTOR1:%.*]] = getelementptr i64, ptr [[PCMP]], i64 1
+; LE-NEXT: store i64 2, ptr [[PSTOR1]], align 4
+; LE-NEXT: [[PSTOR2:%.*]] = getelementptr i64, ptr [[PCMP]], i64 2
+; LE-NEXT: store i64 1, ptr [[PSTOR2]], align 4
+; LE-NEXT: [[PSTOR3:%.*]] = getelementptr i64, ptr [[PCMP]], i64 3
+; LE-NEXT: store i64 0, ptr [[PSTOR3]], align 4
+; LE-NEXT: [[PSTOR4:%.*]] = getelementptr i64, ptr [[PCMP]], i64 4
+; LE-NEXT: store i64 14, ptr [[PSTOR4]], align 4
+; LE-NEXT: [[PSTOR6:%.*]] = getelementptr i64, ptr [[PCMP]], i64 6
+; LE-NEXT: store i64 13, ptr [[PSTOR6]], align 4
+; LE-NEXT: [[PSTOR7:%.*]] = getelementptr i64, ptr [[PCMP]], i64 7
+; LE-NEXT: store i64 12, ptr [[PSTOR7]], align 4
+; LE-NEXT: [[PSTOR8:%.*]] = getelementptr i64, ptr [[PCMP]], i64 8
+; LE-NEXT: store i64 0, ptr [[PSTOR8]], align 4
; LE-NEXT: ret void
;
- %p0 = getelementptr [4 x i32], [4 x i32]* @a, i64 0, i64 0
- %p1 = bitcast i32* %p0 to i8*
- %ip0 = ptrtoint [4 x i32]* @a to i64
+ %ip0 = ptrtoint ptr @a to i64
; Fold memchr(a, 'a', 16) - a to 0 (3 in LE).
- %pa = call i8* @memchr(i8* %p1, i32 97, i64 16)
- %ipa = ptrtoint i8* %pa to i64
+ %pa = call ptr @memchr(ptr @a, i32 97, i64 16)
+ %ipa = ptrtoint ptr %pa to i64
%offa = sub i64 %ipa, %ip0
- %pstor0 = getelementptr i64, i64* %pcmp, i64 0
- store i64 %offa, i64* %pstor0
+ store i64 %offa, ptr %pcmp
; Fold memchr(a, 'b', 16) - a to 1 (2 in LE)
- %pb = call i8* @memchr(i8* %p1, i32 98, i64 16)
- %ipb = ptrtoint i8* %pb to i64
+ %pb = call ptr @memchr(ptr @a, i32 98, i64 16)
+ %ipb = ptrtoint ptr %pb to i64
%offb = sub i64 %ipb, %ip0
- %pstor1 = getelementptr i64, i64* %pcmp, i64 1
- store i64 %offb, i64* %pstor1
+ %pstor1 = getelementptr i64, ptr %pcmp, i64 1
+ store i64 %offb, ptr %pstor1
; Fold memchr(a, 'c', 16) - a to 2 (1 in LE)
- %pc = call i8* @memchr(i8* %p1, i32 99, i64 16)
- %ipc = ptrtoint i8* %pc to i64
+ %pc = call ptr @memchr(ptr @a, i32 99, i64 16)
+ %ipc = ptrtoint ptr %pc to i64
%offc = sub i64 %ipc, %ip0
- %pstor2 = getelementptr i64, i64* %pcmp, i64 2
- store i64 %offc, i64* %pstor2
+ %pstor2 = getelementptr i64, ptr %pcmp, i64 2
+ store i64 %offc, ptr %pstor2
; Fold memchr(a, 'd', 16) - a to 3 (0 in LE)
- %pd = call i8* @memchr(i8* %p1, i32 100, i64 16)
- %ipd = ptrtoint i8* %pd to i64
+ %pd = call ptr @memchr(ptr @a, i32 100, i64 16)
+ %ipd = ptrtoint ptr %pd to i64
%offd = sub i64 %ipd, %ip0
- %pstor3 = getelementptr i64, i64* %pcmp, i64 3
- store i64 %offd, i64* %pstor3
+ %pstor3 = getelementptr i64, ptr %pcmp, i64 3
+ store i64 %offd, ptr %pstor3
; Fold memchr(a, 'n', 16) - a to 13 (14 in LE)
- %pn = call i8* @memchr(i8* %p1, i32 110, i64 16)
- %ipn = ptrtoint i8* %pn to i64
+ %pn = call ptr @memchr(ptr @a, i32 110, i64 16)
+ %ipn = ptrtoint ptr %pn to i64
%offn = sub i64 %ipn, %ip0
- %pstor4 = getelementptr i64, i64* %pcmp, i64 4
- store i64 %offn, i64* %pstor4
+ %pstor4 = getelementptr i64, ptr %pcmp, i64 4
+ store i64 %offn, ptr %pstor4
; Fold memchr(a, 'o', 16) - a to 14 (13 in LE)
- %po = call i8* @memchr(i8* %p1, i32 111, i64 16)
- %ipo = ptrtoint i8* %po to i64
+ %po = call ptr @memchr(ptr @a, i32 111, i64 16)
+ %ipo = ptrtoint ptr %po to i64
%offo = sub i64 %ipo, %ip0
- %pstor6 = getelementptr i64, i64* %pcmp, i64 6
- store i64 %offo, i64* %pstor6
+ %pstor6 = getelementptr i64, ptr %pcmp, i64 6
+ store i64 %offo, ptr %pstor6
; Fold memchr(a, 'p', 16) - a to 15 (12 in LE)
- %pp = call i8* @memchr(i8* %p1, i32 112, i64 16)
- %ipp = ptrtoint i8* %pp to i64
+ %pp = call ptr @memchr(ptr @a, i32 112, i64 16)
+ %ipp = ptrtoint ptr %pp to i64
%offp = sub i64 %ipp, %ip0
- %pstor7 = getelementptr i64, i64* %pcmp, i64 7
- store i64 %offp, i64* %pstor7
+ %pstor7 = getelementptr i64, ptr %pcmp, i64 7
+ store i64 %offp, ptr %pstor7
; Fold memchr(a, 'q', 16) to null in both BE and LE.
- %pq = call i8* @memchr(i8* %p1, i32 113, i64 16)
- %ipq = ptrtoint i8* %pq to i64
- %pstor8 = getelementptr i64, i64* %pcmp, i64 8
- store i64 %ipq, i64* %pstor8
+ %pq = call ptr @memchr(ptr @a, i32 113, i64 16)
+ %ipq = ptrtoint ptr %pq to i64
+ %pstor8 = getelementptr i64, ptr %pcmp, i64 8
+ store i64 %ipq, ptr %pstor8
ret void
}
; Fold memchr(a + 1, C, 12) for C in ['e', 'h'] U ['a', 'd'].
-define void @fold_memchr_a_p1(i64* %pcmp) {
+define void @fold_memchr_a_p1(ptr %pcmp) {
; BE-LABEL: @fold_memchr_a_p1(
-; BE-NEXT: store i64 0, i64* [[PCMP:%.*]], align 4
-; BE-NEXT: [[PSTOR1:%.*]] = getelementptr i64, i64* [[PCMP]], i64 1
-; BE-NEXT: store i64 1, i64* [[PSTOR1]], align 4
-; BE-NEXT: [[PSTOR2:%.*]] = getelementptr i64, i64* [[PCMP]], i64 2
-; BE-NEXT: store i64 2, i64* [[PSTOR2]], align 4
-; BE-NEXT: [[PSTOR3:%.*]] = getelementptr i64, i64* [[PCMP]], i64 3
-; BE-NEXT: store i64 3, i64* [[PSTOR3]], align 4
-; BE-NEXT: [[PSTOR4:%.*]] = getelementptr i64, i64* [[PCMP]], i64 4
-; BE-NEXT: store i64 0, i64* [[PSTOR4]], align 4
-; BE-NEXT: [[PSTOR5:%.*]] = getelementptr i64, i64* [[PCMP]], i64 5
-; BE-NEXT: store i64 0, i64* [[PSTOR5]], align 4
+; BE-NEXT: store i64 0, ptr [[PCMP:%.*]], align 4
+; BE-NEXT: [[PSTOR1:%.*]] = getelementptr i64, ptr [[PCMP]], i64 1
+; BE-NEXT: store i64 1, ptr [[PSTOR1]], align 4
+; BE-NEXT: [[PSTOR2:%.*]] = getelementptr i64, ptr [[PCMP]], i64 2
+; BE-NEXT: store i64 2, ptr [[PSTOR2]], align 4
+; BE-NEXT: [[PSTOR3:%.*]] = getelementptr i64, ptr [[PCMP]], i64 3
+; BE-NEXT: store i64 3, ptr [[PSTOR3]], align 4
+; BE-NEXT: [[PSTOR4:%.*]] = getelementptr i64, ptr [[PCMP]], i64 4
+; BE-NEXT: store i64 0, ptr [[PSTOR4]], align 4
+; BE-NEXT: [[PSTOR5:%.*]] = getelementptr i64, ptr [[PCMP]], i64 5
+; BE-NEXT: store i64 0, ptr [[PSTOR5]], align 4
; BE-NEXT: ret void
;
; LE-LABEL: @fold_memchr_a_p1(
-; LE-NEXT: store i64 3, i64* [[PCMP:%.*]], align 4
-; LE-NEXT: [[PSTOR1:%.*]] = getelementptr i64, i64* [[PCMP]], i64 1
-; LE-NEXT: store i64 2, i64* [[PSTOR1]], align 4
-; LE-NEXT: [[PSTOR2:%.*]] = getelementptr i64, i64* [[PCMP]], i64 2
-; LE-NEXT: store i64 1, i64* [[PSTOR2]], align 4
-; LE-NEXT: [[PSTOR3:%.*]] = getelementptr i64, i64* [[PCMP]], i64 3
-; LE-NEXT: store i64 0, i64* [[PSTOR3]], align 4
-; LE-NEXT: [[PSTOR4:%.*]] = getelementptr i64, i64* [[PCMP]], i64 4
-; LE-NEXT: store i64 0, i64* [[PSTOR4]], align 4
-; LE-NEXT: [[PSTOR5:%.*]] = getelementptr i64, i64* [[PCMP]], i64 5
-; LE-NEXT: store i64 0, i64* [[PSTOR5]], align 4
+; LE-NEXT: store i64 3, ptr [[PCMP:%.*]], align 4
+; LE-NEXT: [[PSTOR1:%.*]] = getelementptr i64, ptr [[PCMP]], i64 1
+; LE-NEXT: store i64 2, ptr [[PSTOR1]], align 4
+; LE-NEXT: [[PSTOR2:%.*]] = getelementptr i64, ptr [[PCMP]], i64 2
+; LE-NEXT: store i64 1, ptr [[PSTOR2]], align 4
+; LE-NEXT: [[PSTOR3:%.*]] = getelementptr i64, ptr [[PCMP]], i64 3
+; LE-NEXT: store i64 0, ptr [[PSTOR3]], align 4
+; LE-NEXT: [[PSTOR4:%.*]] = getelementptr i64, ptr [[PCMP]], i64 4
+; LE-NEXT: store i64 0, ptr [[PSTOR4]], align 4
+; LE-NEXT: [[PSTOR5:%.*]] = getelementptr i64, ptr [[PCMP]], i64 5
+; LE-NEXT: store i64 0, ptr [[PSTOR5]], align 4
; LE-NEXT: ret void
;
- %p0 = getelementptr [4 x i32], [4 x i32]* @a, i64 0, i64 1
- %p1 = bitcast i32* %p0 to i8*
- %ip0 = ptrtoint i8* %p1 to i64
+ %p0 = getelementptr [4 x i32], ptr @a, i64 0, i64 1
+ %ip0 = ptrtoint ptr %p0 to i64
; Fold memchr(a + 1, 'e', 12) - a to 0 (3 in LE).
- %pe = call i8* @memchr(i8* %p1, i32 101, i64 12)
- %ipe = ptrtoint i8* %pe to i64
+ %pe = call ptr @memchr(ptr %p0, i32 101, i64 12)
+ %ipe = ptrtoint ptr %pe to i64
%offe = sub i64 %ipe, %ip0
- %pstor0 = getelementptr i64, i64* %pcmp, i64 0
- store i64 %offe, i64* %pstor0
+ store i64 %offe, ptr %pcmp
; Fold memchr(a + 1, 'f', 12) - a to 1 (2 in LE).
- %pf = call i8* @memchr(i8* %p1, i32 102, i64 12)
- %ipf = ptrtoint i8* %pf to i64
+ %pf = call ptr @memchr(ptr %p0, i32 102, i64 12)
+ %ipf = ptrtoint ptr %pf to i64
%offf = sub i64 %ipf, %ip0
- %pstor1 = getelementptr i64, i64* %pcmp, i64 1
- store i64 %offf, i64* %pstor1
+ %pstor1 = getelementptr i64, ptr %pcmp, i64 1
+ store i64 %offf, ptr %pstor1
; Fold memchr(a + 1, 'g', 12) - a to 2 (1 in LE).
- %pg = call i8* @memchr(i8* %p1, i32 103, i64 12)
- %ipg = ptrtoint i8* %pg to i64
+ %pg = call ptr @memchr(ptr %p0, i32 103, i64 12)
+ %ipg = ptrtoint ptr %pg to i64
%offg = sub i64 %ipg, %ip0
- %pstor2 = getelementptr i64, i64* %pcmp, i64 2
- store i64 %offg, i64* %pstor2
+ %pstor2 = getelementptr i64, ptr %pcmp, i64 2
+ store i64 %offg, ptr %pstor2
; Fold memchr(a + 1, 'h', 12) - a to 3 (0 in LE).
- %ph = call i8* @memchr(i8* %p1, i32 104, i64 12)
- %iph = ptrtoint i8* %ph to i64
+ %ph = call ptr @memchr(ptr %p0, i32 104, i64 12)
+ %iph = ptrtoint ptr %ph to i64
%offh = sub i64 %iph, %ip0
- %pstor3 = getelementptr i64, i64* %pcmp, i64 3
- store i64 %offh, i64* %pstor3
+ %pstor3 = getelementptr i64, ptr %pcmp, i64 3
+ store i64 %offh, ptr %pstor3
; Fold memchr(a + 1, 'a', 12) to null in both BE and LE.
- %pa = call i8* @memchr(i8* %p1, i32 97, i64 12)
- %ipa = ptrtoint i8* %pa to i64
- %pstor4 = getelementptr i64, i64* %pcmp, i64 4
- store i64 %ipa, i64* %pstor4
+ %pa = call ptr @memchr(ptr %p0, i32 97, i64 12)
+ %ipa = ptrtoint ptr %pa to i64
+ %pstor4 = getelementptr i64, ptr %pcmp, i64 4
+ store i64 %ipa, ptr %pstor4
; Fold memchr(a + 1, 'd', 12) to null in both BE and LE.
- %pd = call i8* @memchr(i8* %p1, i32 100, i64 12)
- %ipd = ptrtoint i8* %pd to i64
- %pstor5 = getelementptr i64, i64* %pcmp, i64 5
- store i64 %ipd, i64* %pstor5
+ %pd = call ptr @memchr(ptr %p0, i32 100, i64 12)
+ %ipd = ptrtoint ptr %pd to i64
+ %pstor5 = getelementptr i64, ptr %pcmp, i64 5
+ store i64 %ipd, ptr %pstor5
ret void
}
@hel = constant [4 x i8] c"hel\00"
@hello_u = constant [8 x i8] c"hello_u\00"
-declare i32 @memcmp(i8*, i8*, i32)
+declare i32 @memcmp(ptr, ptr, i32)
; Check memcmp(mem, mem, size) -> 0.
-define i32 @test_simplify1(i8* %mem, i32 %size) {
+define i32 @test_simplify1(ptr %mem, i32 %size) {
; CHECK-LABEL: @test_simplify1(
; CHECK-NEXT: ret i32 0
;
- %ret = call i32 @memcmp(i8* %mem, i8* %mem, i32 %size)
+ %ret = call i32 @memcmp(ptr %mem, ptr %mem, i32 %size)
ret i32 %ret
}
; Check memcmp(mem1, mem2, 0) -> 0.
-define i32 @test_simplify2(i8* %mem1, i8* %mem2) {
+define i32 @test_simplify2(ptr %mem1, ptr %mem2) {
; CHECK-LABEL: @test_simplify2(
; CHECK-NEXT: ret i32 0
;
- %ret = call i32 @memcmp(i8* %mem1, i8* %mem2, i32 0)
+ %ret = call i32 @memcmp(ptr %mem1, ptr %mem2, i32 0)
ret i32 %ret
}
;; Check memcmp(mem1, mem2, 1) -> *(unsigned char*)mem1 - *(unsigned char*)mem2.
-define i32 @test_simplify3(i8* %mem1, i8* %mem2) {
+define i32 @test_simplify3(ptr %mem1, ptr %mem2) {
; CHECK-LABEL: @test_simplify3(
-; CHECK-NEXT: [[LHSC:%.*]] = load i8, i8* %mem1, align 1
+; CHECK-NEXT: [[LHSC:%.*]] = load i8, ptr %mem1, align 1
; CHECK-NEXT: [[LHSV:%.*]] = zext i8 [[LHSC]] to i32
-; CHECK-NEXT: [[RHSC:%.*]] = load i8, i8* %mem2, align 1
+; CHECK-NEXT: [[RHSC:%.*]] = load i8, ptr %mem2, align 1
; CHECK-NEXT: [[RHSV:%.*]] = zext i8 [[RHSC]] to i32
; CHECK-NEXT: [[CHARDIFF:%.*]] = sub nsw i32 [[LHSV]], [[RHSV]]
; CHECK-NEXT: ret i32 [[CHARDIFF]]
;
- %ret = call i32 @memcmp(i8* %mem1, i8* %mem2, i32 1)
+ %ret = call i32 @memcmp(ptr %mem1, ptr %mem2, i32 1)
ret i32 %ret
}
; CHECK-LABEL: @test_simplify4(
; CHECK-NEXT: ret i32 0
;
- %mem1 = getelementptr [4 x i8], [4 x i8]* @hel, i32 0, i32 0
- %mem2 = getelementptr [8 x i8], [8 x i8]* @hello_u, i32 0, i32 0
- %ret = call i32 @memcmp(i8* %mem1, i8* %mem2, i32 3)
+ %ret = call i32 @memcmp(ptr @hel, ptr @hello_u, i32 3)
ret i32 %ret
}
; CHECK-LABEL: @test_simplify5(
; CHECK-NEXT: ret i32 1
;
- %mem1 = getelementptr [4 x i8], [4 x i8]* @hel, i32 0, i32 0
- %mem2 = getelementptr [4 x i8], [4 x i8]* @foo, i32 0, i32 0
- %ret = call i32 @memcmp(i8* %mem1, i8* %mem2, i32 3)
+ %ret = call i32 @memcmp(ptr @hel, ptr @foo, i32 3)
ret i32 %ret
}
; CHECK-LABEL: @test_simplify6(
; CHECK-NEXT: ret i32 -1
;
- %mem1 = getelementptr [4 x i8], [4 x i8]* @foo, i32 0, i32 0
- %mem2 = getelementptr [4 x i8], [4 x i8]* @hel, i32 0, i32 0
- %ret = call i32 @memcmp(i8* %mem1, i8* %mem2, i32 3)
+ %ret = call i32 @memcmp(ptr @foo, ptr @hel, i32 3)
ret i32 %ret
}
;
%x.addr = alloca i64, align 8
%y.addr = alloca i64, align 8
- store i64 %x, i64* %x.addr, align 8
- store i64 %y, i64* %y.addr, align 8
- %xptr = bitcast i64* %x.addr to i8*
- %yptr = bitcast i64* %y.addr to i8*
- %call = call i32 @memcmp(i8* %xptr, i8* %yptr, i32 8)
+ store i64 %x, ptr %x.addr, align 8
+ store i64 %y, ptr %y.addr, align 8
+ %call = call i32 @memcmp(ptr %x.addr, ptr %y.addr, i32 8)
%cmp = icmp eq i32 %call, 0
ret i1 %cmp
}
;
%x.addr = alloca i32, align 4
%y.addr = alloca i32, align 4
- store i32 %x, i32* %x.addr, align 4
- store i32 %y, i32* %y.addr, align 4
- %xptr = bitcast i32* %x.addr to i8*
- %yptr = bitcast i32* %y.addr to i8*
- %call = call i32 @memcmp(i8* %xptr, i8* %yptr, i32 4)
+ store i32 %x, ptr %x.addr, align 4
+ store i32 %y, ptr %y.addr, align 4
+ %call = call i32 @memcmp(ptr %x.addr, ptr %y.addr, i32 4)
%cmp = icmp eq i32 %call, 0
ret i1 %cmp
}
;
%x.addr = alloca i16, align 2
%y.addr = alloca i16, align 2
- store i16 %x, i16* %x.addr, align 2
- store i16 %y, i16* %y.addr, align 2
- %xptr = bitcast i16* %x.addr to i8*
- %yptr = bitcast i16* %y.addr to i8*
- %call = call i32 @memcmp(i8* %xptr, i8* %yptr, i32 2)
+ store i16 %x, ptr %x.addr, align 2
+ store i16 %y, ptr %y.addr, align 2
+ %call = call i32 @memcmp(ptr %x.addr, ptr %y.addr, i32 2)
%cmp = icmp eq i32 %call, 0
ret i1 %cmp
}
; Check memcmp(mem1, mem2, size)==0 -> bcmp(mem1, mem2, size)==0
-define i1 @test_simplify10(i8* %mem1, i8* %mem2, i32 %size) {
+define i1 @test_simplify10(ptr %mem1, ptr %mem2, i32 %size) {
; NOBCMP-LABEL: @test_simplify10(
-; NOBCMP-NEXT: [[CALL:%.*]] = call i32 @memcmp(i8* %mem1, i8* %mem2, i32 %size)
+; NOBCMP-NEXT: [[CALL:%.*]] = call i32 @memcmp(ptr %mem1, ptr %mem2, i32 %size)
; NOBCMP-NEXT: [[CMP:%.*]] = icmp eq i32 [[CALL]], 0
; NOBCMP-NEXT: ret i1 [[CMP]]
;
; BCMP-LABEL: @test_simplify10(
-; BCMP-NEXT: [[CALL:%.*]] = call i32 @bcmp(i8* %mem1, i8* %mem2, i32 %size)
+; BCMP-NEXT: [[CALL:%.*]] = call i32 @bcmp(ptr %mem1, ptr %mem2, i32 %size)
; BCMP-NEXT: [[CMP:%.*]] = icmp eq i32 [[CALL]], 0
; BCMP-NEXT: ret i1 [[CMP]]
;
- %call = call i32 @memcmp(i8* %mem1, i8* %mem2, i32 %size)
+ %call = call i32 @memcmp(ptr %mem1, ptr %mem2, i32 %size)
%cmp = icmp eq i32 %call, 0
ret i1 %cmp
}
target datalayout = "e-p:32:32:32-i1:8:8-i8:8:8-i16:16:16-i32:32:32-i64:32:64-f32:32:32-f64:32:64-v64:64:64-v128:128:128-a0:0:64-f80:128:128"
-declare i32* @memcmp(i8*, i8*, i32)
+declare ptr @memcmp(ptr, ptr, i32)
; Check that memcmp functions with the wrong prototype aren't simplified.
-define i32* @test_no_simplify1(i8* %mem, i32 %size) {
+define ptr @test_no_simplify1(ptr %mem, i32 %size) {
; CHECK-LABEL: @test_no_simplify1(
- %ret = call i32* @memcmp(i8* %mem, i8* %mem, i32 %size)
-; CHECK-NEXT: call i32* @memcmp
- ret i32* %ret
-; CHECK-NEXT: ret i32* %ret
+ %ret = call ptr @memcmp(ptr %mem, ptr %mem, i32 %size)
+; CHECK-NEXT: call ptr @memcmp
+ ret ptr %ret
+; CHECK-NEXT: ret ptr %ret
}
; RUN: opt < %s -passes=instcombine -S -data-layout="E" | FileCheck %s --check-prefixes=BE
; RUN: opt < %s -passes=instcombine -S -data-layout="e" | FileCheck %s --check-prefixes=LE
-declare i32 @memcmp(i8*, i8*, i64)
+declare i32 @memcmp(ptr, ptr, i64)
; BE representation: { 'a', 'b', 'c', ..., 'f', 'g', 'h' }
; LE representation: { 'b', 'a', 'd', ..., 'e', 'h', 'g' }
; Fold memcmp(ia16a, i8a, N) for N in [0, 8].
-define void @fold_memcmp_ia16a_i8a(i32* %pcmp) {
+define void @fold_memcmp_ia16a_i8a(ptr %pcmp) {
; BE-LABEL: @fold_memcmp_ia16a_i8a(
-; BE-NEXT: store i32 0, i32* [[PCMP:%.*]], align 4
-; BE-NEXT: [[PSTOR1:%.*]] = getelementptr i32, i32* [[PCMP]], i64 1
-; BE-NEXT: store i32 0, i32* [[PSTOR1]], align 4
-; BE-NEXT: [[PSTOR2:%.*]] = getelementptr i32, i32* [[PCMP]], i64 2
-; BE-NEXT: store i32 0, i32* [[PSTOR2]], align 4
-; BE-NEXT: [[PSTOR3:%.*]] = getelementptr i32, i32* [[PCMP]], i64 3
-; BE-NEXT: store i32 0, i32* [[PSTOR3]], align 4
-; BE-NEXT: [[PSTOR4:%.*]] = getelementptr i32, i32* [[PCMP]], i64 4
-; BE-NEXT: store i32 0, i32* [[PSTOR4]], align 4
-; BE-NEXT: [[PSTOR5:%.*]] = getelementptr i32, i32* [[PCMP]], i64 5
-; BE-NEXT: store i32 0, i32* [[PSTOR5]], align 4
-; BE-NEXT: [[PSTOR6:%.*]] = getelementptr i32, i32* [[PCMP]], i64 6
-; BE-NEXT: store i32 0, i32* [[PSTOR6]], align 4
-; BE-NEXT: [[PSTOR7:%.*]] = getelementptr i32, i32* [[PCMP]], i64 7
-; BE-NEXT: store i32 0, i32* [[PSTOR7]], align 4
-; BE-NEXT: [[PSTOR8:%.*]] = getelementptr i32, i32* [[PCMP]], i64 8
-; BE-NEXT: store i32 1, i32* [[PSTOR8]], align 4
+; BE-NEXT: store i32 0, ptr [[PCMP:%.*]], align 4
+; BE-NEXT: [[PSTOR1:%.*]] = getelementptr i32, ptr [[PCMP]], i64 1
+; BE-NEXT: store i32 0, ptr [[PSTOR1]], align 4
+; BE-NEXT: [[PSTOR2:%.*]] = getelementptr i32, ptr [[PCMP]], i64 2
+; BE-NEXT: store i32 0, ptr [[PSTOR2]], align 4
+; BE-NEXT: [[PSTOR3:%.*]] = getelementptr i32, ptr [[PCMP]], i64 3
+; BE-NEXT: store i32 0, ptr [[PSTOR3]], align 4
+; BE-NEXT: [[PSTOR4:%.*]] = getelementptr i32, ptr [[PCMP]], i64 4
+; BE-NEXT: store i32 0, ptr [[PSTOR4]], align 4
+; BE-NEXT: [[PSTOR5:%.*]] = getelementptr i32, ptr [[PCMP]], i64 5
+; BE-NEXT: store i32 0, ptr [[PSTOR5]], align 4
+; BE-NEXT: [[PSTOR6:%.*]] = getelementptr i32, ptr [[PCMP]], i64 6
+; BE-NEXT: store i32 0, ptr [[PSTOR6]], align 4
+; BE-NEXT: [[PSTOR7:%.*]] = getelementptr i32, ptr [[PCMP]], i64 7
+; BE-NEXT: store i32 0, ptr [[PSTOR7]], align 4
+; BE-NEXT: [[PSTOR8:%.*]] = getelementptr i32, ptr [[PCMP]], i64 8
+; BE-NEXT: store i32 1, ptr [[PSTOR8]], align 4
; BE-NEXT: ret void
;
; LE-LABEL: @fold_memcmp_ia16a_i8a(
-; LE-NEXT: store i32 0, i32* [[PCMP:%.*]], align 4
-; LE-NEXT: [[PSTOR1:%.*]] = getelementptr i32, i32* [[PCMP]], i64 1
-; LE-NEXT: store i32 1, i32* [[PSTOR1]], align 4
-; LE-NEXT: [[PSTOR2:%.*]] = getelementptr i32, i32* [[PCMP]], i64 2
-; LE-NEXT: store i32 1, i32* [[PSTOR2]], align 4
-; LE-NEXT: [[PSTOR3:%.*]] = getelementptr i32, i32* [[PCMP]], i64 3
-; LE-NEXT: store i32 1, i32* [[PSTOR3]], align 4
-; LE-NEXT: [[PSTOR4:%.*]] = getelementptr i32, i32* [[PCMP]], i64 4
-; LE-NEXT: store i32 1, i32* [[PSTOR4]], align 4
-; LE-NEXT: [[PSTOR5:%.*]] = getelementptr i32, i32* [[PCMP]], i64 5
-; LE-NEXT: store i32 1, i32* [[PSTOR5]], align 4
-; LE-NEXT: [[PSTOR6:%.*]] = getelementptr i32, i32* [[PCMP]], i64 6
-; LE-NEXT: store i32 1, i32* [[PSTOR6]], align 4
-; LE-NEXT: [[PSTOR7:%.*]] = getelementptr i32, i32* [[PCMP]], i64 7
-; LE-NEXT: store i32 1, i32* [[PSTOR7]], align 4
-; LE-NEXT: [[PSTOR8:%.*]] = getelementptr i32, i32* [[PCMP]], i64 8
-; LE-NEXT: store i32 1, i32* [[PSTOR8]], align 4
+; LE-NEXT: store i32 0, ptr [[PCMP:%.*]], align 4
+; LE-NEXT: [[PSTOR1:%.*]] = getelementptr i32, ptr [[PCMP]], i64 1
+; LE-NEXT: store i32 1, ptr [[PSTOR1]], align 4
+; LE-NEXT: [[PSTOR2:%.*]] = getelementptr i32, ptr [[PCMP]], i64 2
+; LE-NEXT: store i32 1, ptr [[PSTOR2]], align 4
+; LE-NEXT: [[PSTOR3:%.*]] = getelementptr i32, ptr [[PCMP]], i64 3
+; LE-NEXT: store i32 1, ptr [[PSTOR3]], align 4
+; LE-NEXT: [[PSTOR4:%.*]] = getelementptr i32, ptr [[PCMP]], i64 4
+; LE-NEXT: store i32 1, ptr [[PSTOR4]], align 4
+; LE-NEXT: [[PSTOR5:%.*]] = getelementptr i32, ptr [[PCMP]], i64 5
+; LE-NEXT: store i32 1, ptr [[PSTOR5]], align 4
+; LE-NEXT: [[PSTOR6:%.*]] = getelementptr i32, ptr [[PCMP]], i64 6
+; LE-NEXT: store i32 1, ptr [[PSTOR6]], align 4
+; LE-NEXT: [[PSTOR7:%.*]] = getelementptr i32, ptr [[PCMP]], i64 7
+; LE-NEXT: store i32 1, ptr [[PSTOR7]], align 4
+; LE-NEXT: [[PSTOR8:%.*]] = getelementptr i32, ptr [[PCMP]], i64 8
+; LE-NEXT: store i32 1, ptr [[PSTOR8]], align 4
; LE-NEXT: ret void
;
- %p0 = getelementptr [4 x i16], [4 x i16]* @ia16a, i64 0, i64 0
- %p1 = bitcast i16* %p0 to i8*
- %q = getelementptr [8 x i8], [8 x i8]* @i8a, i64 0, i64 0
- %cmp0 = call i32 @memcmp(i8* %p1, i8* %q, i64 0)
- %pstor0 = getelementptr i32, i32* %pcmp, i64 0
- store i32 %cmp0, i32* %pstor0
+ %cmp0 = call i32 @memcmp(ptr @ia16a, ptr @i8a, i64 0)
+ store i32 %cmp0, ptr %pcmp
- %cmp1 = call i32 @memcmp(i8* %p1, i8* %q, i64 1)
- %pstor1 = getelementptr i32, i32* %pcmp, i64 1
- store i32 %cmp1, i32* %pstor1
+ %cmp1 = call i32 @memcmp(ptr @ia16a, ptr @i8a, i64 1)
+ %pstor1 = getelementptr i32, ptr %pcmp, i64 1
+ store i32 %cmp1, ptr %pstor1
- %cmp2 = call i32 @memcmp(i8* %p1, i8* %q, i64 2)
- %pstor2 = getelementptr i32, i32* %pcmp, i64 2
- store i32 %cmp2, i32* %pstor2
+ %cmp2 = call i32 @memcmp(ptr @ia16a, ptr @i8a, i64 2)
+ %pstor2 = getelementptr i32, ptr %pcmp, i64 2
+ store i32 %cmp2, ptr %pstor2
- %cmp3 = call i32 @memcmp(i8* %p1, i8* %q, i64 3)
- %pstor3 = getelementptr i32, i32* %pcmp, i64 3
- store i32 %cmp3, i32* %pstor3
+ %cmp3 = call i32 @memcmp(ptr @ia16a, ptr @i8a, i64 3)
+ %pstor3 = getelementptr i32, ptr %pcmp, i64 3
+ store i32 %cmp3, ptr %pstor3
- %cmp4 = call i32 @memcmp(i8* %p1, i8* %q, i64 4)
- %pstor4 = getelementptr i32, i32* %pcmp, i64 4
- store i32 %cmp4, i32* %pstor4
+ %cmp4 = call i32 @memcmp(ptr @ia16a, ptr @i8a, i64 4)
+ %pstor4 = getelementptr i32, ptr %pcmp, i64 4
+ store i32 %cmp4, ptr %pstor4
- %cmp5 = call i32 @memcmp(i8* %p1, i8* %q, i64 5)
- %pstor5 = getelementptr i32, i32* %pcmp, i64 5
- store i32 %cmp5, i32* %pstor5
+ %cmp5 = call i32 @memcmp(ptr @ia16a, ptr @i8a, i64 5)
+ %pstor5 = getelementptr i32, ptr %pcmp, i64 5
+ store i32 %cmp5, ptr %pstor5
- %cmp6 = call i32 @memcmp(i8* %p1, i8* %q, i64 6)
- %pstor6 = getelementptr i32, i32* %pcmp, i64 6
- store i32 %cmp6, i32* %pstor6
+ %cmp6 = call i32 @memcmp(ptr @ia16a, ptr @i8a, i64 6)
+ %pstor6 = getelementptr i32, ptr %pcmp, i64 6
+ store i32 %cmp6, ptr %pstor6
- %cmp7 = call i32 @memcmp(i8* %p1, i8* %q, i64 7)
- %pstor7 = getelementptr i32, i32* %pcmp, i64 7
- store i32 %cmp7, i32* %pstor7
+ %cmp7 = call i32 @memcmp(ptr @ia16a, ptr @i8a, i64 7)
+ %pstor7 = getelementptr i32, ptr %pcmp, i64 7
+ store i32 %cmp7, ptr %pstor7
- %cmp8 = call i32 @memcmp(i8* %p1, i8* %q, i64 8)
- %pstor8 = getelementptr i32, i32* %pcmp, i64 8
- store i32 %cmp8, i32* %pstor8
+ %cmp8 = call i32 @memcmp(ptr @ia16a, ptr @i8a, i64 8)
+ %pstor8 = getelementptr i32, ptr %pcmp, i64 8
+ store i32 %cmp8, ptr %pstor8
ret void
}
; Fold memcmp(ia16a + 1, i8a + 2, N) for N in [0, 6].
-define void @fold_memcmp_ia16a_p1_i8a_p1(i32* %pcmp) {
+define void @fold_memcmp_ia16a_p1_i8a_p1(ptr %pcmp) {
; BE-LABEL: @fold_memcmp_ia16a_p1_i8a_p1(
-; BE-NEXT: store i32 0, i32* [[PCMP:%.*]], align 4
-; BE-NEXT: [[PSTOR1:%.*]] = getelementptr i32, i32* [[PCMP]], i64 1
-; BE-NEXT: store i32 1, i32* [[PSTOR1]], align 4
-; BE-NEXT: [[PSTOR2:%.*]] = getelementptr i32, i32* [[PCMP]], i64 2
-; BE-NEXT: store i32 1, i32* [[PSTOR2]], align 4
-; BE-NEXT: [[PSTOR3:%.*]] = getelementptr i32, i32* [[PCMP]], i64 3
-; BE-NEXT: store i32 1, i32* [[PSTOR3]], align 4
-; BE-NEXT: [[PSTOR4:%.*]] = getelementptr i32, i32* [[PCMP]], i64 4
-; BE-NEXT: store i32 1, i32* [[PSTOR4]], align 4
-; BE-NEXT: [[PSTOR5:%.*]] = getelementptr i32, i32* [[PCMP]], i64 5
-; BE-NEXT: store i32 1, i32* [[PSTOR5]], align 4
-; BE-NEXT: [[PSTOR6:%.*]] = getelementptr i32, i32* [[PCMP]], i64 6
-; BE-NEXT: store i32 1, i32* [[PSTOR6]], align 4
+; BE-NEXT: store i32 0, ptr [[PCMP:%.*]], align 4
+; BE-NEXT: [[PSTOR1:%.*]] = getelementptr i32, ptr [[PCMP]], i64 1
+; BE-NEXT: store i32 1, ptr [[PSTOR1]], align 4
+; BE-NEXT: [[PSTOR2:%.*]] = getelementptr i32, ptr [[PCMP]], i64 2
+; BE-NEXT: store i32 1, ptr [[PSTOR2]], align 4
+; BE-NEXT: [[PSTOR3:%.*]] = getelementptr i32, ptr [[PCMP]], i64 3
+; BE-NEXT: store i32 1, ptr [[PSTOR3]], align 4
+; BE-NEXT: [[PSTOR4:%.*]] = getelementptr i32, ptr [[PCMP]], i64 4
+; BE-NEXT: store i32 1, ptr [[PSTOR4]], align 4
+; BE-NEXT: [[PSTOR5:%.*]] = getelementptr i32, ptr [[PCMP]], i64 5
+; BE-NEXT: store i32 1, ptr [[PSTOR5]], align 4
+; BE-NEXT: [[PSTOR6:%.*]] = getelementptr i32, ptr [[PCMP]], i64 6
+; BE-NEXT: store i32 1, ptr [[PSTOR6]], align 4
; BE-NEXT: ret void
;
; LE-LABEL: @fold_memcmp_ia16a_p1_i8a_p1(
-; LE-NEXT: store i32 0, i32* [[PCMP:%.*]], align 4
-; LE-NEXT: [[PSTOR1:%.*]] = getelementptr i32, i32* [[PCMP]], i64 1
-; LE-NEXT: store i32 1, i32* [[PSTOR1]], align 4
-; LE-NEXT: [[PSTOR2:%.*]] = getelementptr i32, i32* [[PCMP]], i64 2
-; LE-NEXT: store i32 1, i32* [[PSTOR2]], align 4
-; LE-NEXT: [[PSTOR3:%.*]] = getelementptr i32, i32* [[PCMP]], i64 3
-; LE-NEXT: store i32 1, i32* [[PSTOR3]], align 4
-; LE-NEXT: [[PSTOR4:%.*]] = getelementptr i32, i32* [[PCMP]], i64 4
-; LE-NEXT: store i32 1, i32* [[PSTOR4]], align 4
-; LE-NEXT: [[PSTOR5:%.*]] = getelementptr i32, i32* [[PCMP]], i64 5
-; LE-NEXT: store i32 1, i32* [[PSTOR5]], align 4
-; LE-NEXT: [[PSTOR6:%.*]] = getelementptr i32, i32* [[PCMP]], i64 6
-; LE-NEXT: store i32 1, i32* [[PSTOR6]], align 4
+; LE-NEXT: store i32 0, ptr [[PCMP:%.*]], align 4
+; LE-NEXT: [[PSTOR1:%.*]] = getelementptr i32, ptr [[PCMP]], i64 1
+; LE-NEXT: store i32 1, ptr [[PSTOR1]], align 4
+; LE-NEXT: [[PSTOR2:%.*]] = getelementptr i32, ptr [[PCMP]], i64 2
+; LE-NEXT: store i32 1, ptr [[PSTOR2]], align 4
+; LE-NEXT: [[PSTOR3:%.*]] = getelementptr i32, ptr [[PCMP]], i64 3
+; LE-NEXT: store i32 1, ptr [[PSTOR3]], align 4
+; LE-NEXT: [[PSTOR4:%.*]] = getelementptr i32, ptr [[PCMP]], i64 4
+; LE-NEXT: store i32 1, ptr [[PSTOR4]], align 4
+; LE-NEXT: [[PSTOR5:%.*]] = getelementptr i32, ptr [[PCMP]], i64 5
+; LE-NEXT: store i32 1, ptr [[PSTOR5]], align 4
+; LE-NEXT: [[PSTOR6:%.*]] = getelementptr i32, ptr [[PCMP]], i64 6
+; LE-NEXT: store i32 1, ptr [[PSTOR6]], align 4
; LE-NEXT: ret void
;
- %p0 = getelementptr [4 x i16], [4 x i16]* @ia16a, i64 0, i64 1
- %p1 = bitcast i16* %p0 to i8*
- %q = getelementptr [8 x i8], [8 x i8]* @i8a, i64 0, i64 1
+ %p0 = getelementptr [4 x i16], ptr @ia16a, i64 0, i64 1
+ %q = getelementptr [8 x i8], ptr @i8a, i64 0, i64 1
- %cmp0 = call i32 @memcmp(i8* %p1, i8* %q, i64 0)
- %pstor0 = getelementptr i32, i32* %pcmp, i64 0
- store i32 %cmp0, i32* %pstor0
+ %cmp0 = call i32 @memcmp(ptr %p0, ptr %q, i64 0)
+ store i32 %cmp0, ptr %pcmp
- %cmp1 = call i32 @memcmp(i8* %p1, i8* %q, i64 1)
- %pstor1 = getelementptr i32, i32* %pcmp, i64 1
- store i32 %cmp1, i32* %pstor1
+ %cmp1 = call i32 @memcmp(ptr %p0, ptr %q, i64 1)
+ %pstor1 = getelementptr i32, ptr %pcmp, i64 1
+ store i32 %cmp1, ptr %pstor1
- %cmp2 = call i32 @memcmp(i8* %p1, i8* %q, i64 2)
- %pstor2 = getelementptr i32, i32* %pcmp, i64 2
- store i32 %cmp2, i32* %pstor2
+ %cmp2 = call i32 @memcmp(ptr %p0, ptr %q, i64 2)
+ %pstor2 = getelementptr i32, ptr %pcmp, i64 2
+ store i32 %cmp2, ptr %pstor2
- %cmp3 = call i32 @memcmp(i8* %p1, i8* %q, i64 3)
- %pstor3 = getelementptr i32, i32* %pcmp, i64 3
- store i32 %cmp3, i32* %pstor3
+ %cmp3 = call i32 @memcmp(ptr %p0, ptr %q, i64 3)
+ %pstor3 = getelementptr i32, ptr %pcmp, i64 3
+ store i32 %cmp3, ptr %pstor3
- %cmp4 = call i32 @memcmp(i8* %p1, i8* %q, i64 4)
- %pstor4 = getelementptr i32, i32* %pcmp, i64 4
- store i32 %cmp4, i32* %pstor4
+ %cmp4 = call i32 @memcmp(ptr %p0, ptr %q, i64 4)
+ %pstor4 = getelementptr i32, ptr %pcmp, i64 4
+ store i32 %cmp4, ptr %pstor4
- %cmp5 = call i32 @memcmp(i8* %p1, i8* %q, i64 5)
- %pstor5 = getelementptr i32, i32* %pcmp, i64 5
- store i32 %cmp5, i32* %pstor5
+ %cmp5 = call i32 @memcmp(ptr %p0, ptr %q, i64 5)
+ %pstor5 = getelementptr i32, ptr %pcmp, i64 5
+ store i32 %cmp5, ptr %pstor5
- %cmp6 = call i32 @memcmp(i8* %p1, i8* %q, i64 6)
- %pstor6 = getelementptr i32, i32* %pcmp, i64 6
- store i32 %cmp6, i32* %pstor6
+ %cmp6 = call i32 @memcmp(ptr %p0, ptr %q, i64 6)
+ %pstor6 = getelementptr i32, ptr %pcmp, i64 6
+ store i32 %cmp6, ptr %pstor6
ret void
}
; RUN: opt < %s -passes=instcombine -S -data-layout="E" | FileCheck %s --check-prefixes=BE
; RUN: opt < %s -passes=instcombine -S -data-layout="e" | FileCheck %s --check-prefixes=LE
-declare i32 @memcmp(i8*, i8*, i64)
+declare i32 @memcmp(ptr, ptr, i64)
@ia16a = constant [4 x i16] [i16 24930, i16 25444, i16 25958, i16 26472]
@ia16b = constant [5 x i16] [i16 24930, i16 25444, i16 25958, i16 26472, i16 26992]
; value (analogous to strncmp) is safer than letting a SIMD library
; implementation return a bogus value.
-define void @fold_memcmp_mismatch_too_big(i32* %pcmp) {
+define void @fold_memcmp_mismatch_too_big(ptr %pcmp) {
; BE-LABEL: @fold_memcmp_mismatch_too_big(
-; BE-NEXT: store i32 -1, i32* [[PCMP:%.*]], align 4
-; BE-NEXT: [[PSTOR_CB:%.*]] = getelementptr i32, i32* [[PCMP]], i64 1
-; BE-NEXT: store i32 1, i32* [[PSTOR_CB]], align 4
+; BE-NEXT: store i32 -1, ptr [[PCMP:%.*]], align 4
+; BE-NEXT: [[PSTOR_CB:%.*]] = getelementptr i32, ptr [[PCMP]], i64 1
+; BE-NEXT: store i32 1, ptr [[PSTOR_CB]], align 4
; BE-NEXT: ret void
;
; LE-LABEL: @fold_memcmp_mismatch_too_big(
-; LE-NEXT: store i32 -1, i32* [[PCMP:%.*]], align 4
-; LE-NEXT: [[PSTOR_CB:%.*]] = getelementptr i32, i32* [[PCMP]], i64 1
-; LE-NEXT: store i32 1, i32* [[PSTOR_CB]], align 4
+; LE-NEXT: store i32 -1, ptr [[PCMP:%.*]], align 4
+; LE-NEXT: [[PSTOR_CB:%.*]] = getelementptr i32, ptr [[PCMP]], i64 1
+; LE-NEXT: store i32 1, ptr [[PSTOR_CB]], align 4
; LE-NEXT: ret void
;
- %p0 = getelementptr [5 x i16], [5 x i16]* @ia16b, i64 0, i64 0
- %p1 = bitcast i16* %p0 to i8*
- %q0 = getelementptr [6 x i16], [6 x i16]* @ia16c, i64 0, i64 0
- %q1 = bitcast i16* %q0 to i8*
- %cmp_bc = call i32 @memcmp(i8* %p1, i8* %q1, i64 12)
- %pstor_bc = getelementptr i32, i32* %pcmp, i64 0
- store i32 %cmp_bc, i32* %pstor_bc
+ %cmp_bc = call i32 @memcmp(ptr @ia16b, ptr @ia16c, i64 12)
+ store i32 %cmp_bc, ptr %pcmp
- %cmp_cb = call i32 @memcmp(i8* %q1, i8* %p1, i64 12)
- %pstor_cb = getelementptr i32, i32* %pcmp, i64 1
- store i32 %cmp_cb, i32* %pstor_cb
+ %cmp_cb = call i32 @memcmp(ptr @ia16c, ptr @ia16b, i64 12)
+ %pstor_cb = getelementptr i32, ptr %pcmp, i64 1
+ store i32 %cmp_cb, ptr %pstor_cb
ret void
}
; Like in the instances above, this is preferable to letting the undefined
; calls take place, although it does prevent sanitizers from detecting them.
-define void @fold_memcmp_match_too_big(i32* %pcmp) {
+define void @fold_memcmp_match_too_big(ptr %pcmp) {
; BE-LABEL: @fold_memcmp_match_too_big(
-; BE-NEXT: store i32 0, i32* [[PCMP:%.*]], align 4
-; BE-NEXT: [[PSTOR_AB_M1:%.*]] = getelementptr i32, i32* [[PCMP]], i64 1
-; BE-NEXT: store i32 0, i32* [[PSTOR_AB_M1]], align 4
+; BE-NEXT: store i32 0, ptr [[PCMP:%.*]], align 4
+; BE-NEXT: [[PSTOR_AB_M1:%.*]] = getelementptr i32, ptr [[PCMP]], i64 1
+; BE-NEXT: store i32 0, ptr [[PSTOR_AB_M1]], align 4
; BE-NEXT: ret void
;
; LE-LABEL: @fold_memcmp_match_too_big(
-; LE-NEXT: store i32 0, i32* [[PCMP:%.*]], align 4
-; LE-NEXT: [[PSTOR_AB_M1:%.*]] = getelementptr i32, i32* [[PCMP]], i64 1
-; LE-NEXT: store i32 0, i32* [[PSTOR_AB_M1]], align 4
+; LE-NEXT: store i32 0, ptr [[PCMP:%.*]], align 4
+; LE-NEXT: [[PSTOR_AB_M1:%.*]] = getelementptr i32, ptr [[PCMP]], i64 1
+; LE-NEXT: store i32 0, ptr [[PSTOR_AB_M1]], align 4
; LE-NEXT: ret void
;
- %p0 = getelementptr [4 x i16], [4 x i16]* @ia16a, i64 0, i64 0
- %p1 = bitcast i16* %p0 to i8*
- %q0 = getelementptr [5 x i16], [5 x i16]* @ia16b, i64 0, i64 0
- %q1 = bitcast i16* %q0 to i8*
- %cmp_ab_9 = call i32 @memcmp(i8* %p1, i8* %q1, i64 9)
- %pstor_ab_9 = getelementptr i32, i32* %pcmp, i64 0
- store i32 %cmp_ab_9, i32* %pstor_ab_9
+ %cmp_ab_9 = call i32 @memcmp(ptr @ia16a, ptr @ia16b, i64 9)
+ store i32 %cmp_ab_9, ptr %pcmp
- %cmp_ab_m1 = call i32 @memcmp(i8* %p1, i8* %q1, i64 -1)
- %pstor_ab_m1 = getelementptr i32, i32* %pcmp, i64 1
- store i32 %cmp_ab_m1, i32* %pstor_ab_m1
+ %cmp_ab_m1 = call i32 @memcmp(ptr @ia16a, ptr @ia16b, i64 -1)
+ %pstor_ab_m1 = getelementptr i32, ptr %pcmp, i64 1
+ store i32 %cmp_ab_m1, ptr %pstor_ab_m1
ret void
}
; Exercise folding of memcmp calls with constant arrays including both
; negative and positive characters and both constant and nonconstant sizes.
-declare i32 @memcmp(i8*, i8*, i64)
+declare i32 @memcmp(ptr, ptr, i64)
@a = constant [7 x i8] c"abcdef\7f"
@b = constant [7 x i8] c"abcdef\80"
; Exercise memcmp(A + C, B + C, 2) folding of small arrays that differ in
; a character with the opposite sign and a constant size.
-define void @fold_memcmp_cst_cst(i32* %pcmp) {
+define void @fold_memcmp_cst_cst(ptr %pcmp) {
; CHECK-LABEL: @fold_memcmp_cst_cst(
-; CHECK-NEXT: store i32 -1, i32* [[PCMP:%.*]], align 4
-; CHECK-NEXT: [[SB5_A5:%.*]] = getelementptr i32, i32* [[PCMP]], i64 1
-; CHECK-NEXT: store i32 1, i32* [[SB5_A5]], align 4
-; CHECK-NEXT: [[SA6_B6:%.*]] = getelementptr i32, i32* [[PCMP]], i64 2
-; CHECK-NEXT: store i32 -1, i32* [[SA6_B6]], align 4
-; CHECK-NEXT: [[SB6_A6:%.*]] = getelementptr i32, i32* [[PCMP]], i64 3
-; CHECK-NEXT: store i32 1, i32* [[SB6_A6]], align 4
+; CHECK-NEXT: store i32 -1, ptr [[PCMP:%.*]], align 4
+; CHECK-NEXT: [[SB5_A5:%.*]] = getelementptr i32, ptr [[PCMP]], i64 1
+; CHECK-NEXT: store i32 1, ptr [[SB5_A5]], align 4
+; CHECK-NEXT: [[SA6_B6:%.*]] = getelementptr i32, ptr [[PCMP]], i64 2
+; CHECK-NEXT: store i32 -1, ptr [[SA6_B6]], align 4
+; CHECK-NEXT: [[SB6_A6:%.*]] = getelementptr i32, ptr [[PCMP]], i64 3
+; CHECK-NEXT: store i32 1, ptr [[SB6_A6]], align 4
; CHECK-NEXT: ret void
;
- %p5 = getelementptr [7 x i8], [7 x i8]* @a, i64 0, i64 5
- %p6 = getelementptr [7 x i8], [7 x i8]* @a, i64 0, i64 6
+ %p5 = getelementptr [7 x i8], ptr @a, i64 0, i64 5
+ %p6 = getelementptr [7 x i8], ptr @a, i64 0, i64 6
- %q5 = getelementptr [7 x i8], [7 x i8]* @b, i64 0, i64 5
- %q6 = getelementptr [7 x i8], [7 x i8]* @b, i64 0, i64 6
+ %q5 = getelementptr [7 x i8], ptr @b, i64 0, i64 5
+ %q6 = getelementptr [7 x i8], ptr @b, i64 0, i64 6
; Fold memcmp(a + 5, b + 5, 2) to -1.
- %ca5_b5 = call i32 @memcmp(i8* %p5, i8* %q5, i64 2)
- %sa5_b5 = getelementptr i32, i32* %pcmp, i64 0
- store i32 %ca5_b5, i32* %sa5_b5
+ %ca5_b5 = call i32 @memcmp(ptr %p5, ptr %q5, i64 2)
+ store i32 %ca5_b5, ptr %pcmp
; Fold memcmp(b + 5, a + 5, 2) to +1.
- %cb5_a5 = call i32 @memcmp(i8* %q5, i8* %p5, i64 2)
- %sb5_a5 = getelementptr i32, i32* %pcmp, i64 1
- store i32 %cb5_a5, i32* %sb5_a5
+ %cb5_a5 = call i32 @memcmp(ptr %q5, ptr %p5, i64 2)
+ %sb5_a5 = getelementptr i32, ptr %pcmp, i64 1
+ store i32 %cb5_a5, ptr %sb5_a5
; Fold memcmp(a + 6, b + 6, 1) to -1.
- %ca6_b6 = call i32 @memcmp(i8* %p6, i8* %q6, i64 1)
- %sa6_b6 = getelementptr i32, i32* %pcmp, i64 2
- store i32 %ca6_b6, i32* %sa6_b6
+ %ca6_b6 = call i32 @memcmp(ptr %p6, ptr %q6, i64 1)
+ %sa6_b6 = getelementptr i32, ptr %pcmp, i64 2
+ store i32 %ca6_b6, ptr %sa6_b6
; Fold memcmp(b + 6, a + 6, 1) to +1.
- %cb6_a6 = call i32 @memcmp(i8* %q6, i8* %p6, i64 1)
- %sb6_a6 = getelementptr i32, i32* %pcmp, i64 3
- store i32 %cb6_a6, i32* %sb6_a6
+ %cb6_a6 = call i32 @memcmp(ptr %q6, ptr %p6, i64 1)
+ %sb6_a6 = getelementptr i32, ptr %pcmp, i64 3
+ store i32 %cb6_a6, ptr %sb6_a6
ret void
}
; Exercise memcmp(A, B, N) folding of arrays that differ in a character
; with the opposite sign and a variable size
-define void @fold_memcmp_cst_var(i32* %pcmp, i64 %n) {
+define void @fold_memcmp_cst_var(ptr %pcmp, i64 %n) {
; CHECK-LABEL: @fold_memcmp_cst_var(
; CHECK-NEXT: [[TMP1:%.*]] = icmp ugt i64 [[N:%.*]], 6
; CHECK-NEXT: [[TMP2:%.*]] = sext i1 [[TMP1]] to i32
-; CHECK-NEXT: store i32 [[TMP2]], i32* [[PCMP:%.*]], align 4
+; CHECK-NEXT: store i32 [[TMP2]], ptr [[PCMP:%.*]], align 4
; CHECK-NEXT: [[TMP3:%.*]] = icmp ugt i64 [[N]], 6
; CHECK-NEXT: [[TMP4:%.*]] = zext i1 [[TMP3]] to i32
-; CHECK-NEXT: [[SB0_A0:%.*]] = getelementptr i32, i32* [[PCMP]], i64 1
-; CHECK-NEXT: store i32 [[TMP4]], i32* [[SB0_A0]], align 4
+; CHECK-NEXT: [[SB0_A0:%.*]] = getelementptr i32, ptr [[PCMP]], i64 1
+; CHECK-NEXT: store i32 [[TMP4]], ptr [[SB0_A0]], align 4
; CHECK-NEXT: [[TMP5:%.*]] = icmp ne i64 [[N]], 0
; CHECK-NEXT: [[TMP6:%.*]] = sext i1 [[TMP5]] to i32
-; CHECK-NEXT: [[SA6_B6:%.*]] = getelementptr i32, i32* [[PCMP]], i64 2
-; CHECK-NEXT: store i32 [[TMP6]], i32* [[SA6_B6]], align 4
+; CHECK-NEXT: [[SA6_B6:%.*]] = getelementptr i32, ptr [[PCMP]], i64 2
+; CHECK-NEXT: store i32 [[TMP6]], ptr [[SA6_B6]], align 4
; CHECK-NEXT: [[TMP7:%.*]] = icmp ne i64 [[N]], 0
; CHECK-NEXT: [[TMP8:%.*]] = zext i1 [[TMP7]] to i32
-; CHECK-NEXT: [[SB6_A6:%.*]] = getelementptr i32, i32* [[PCMP]], i64 3
-; CHECK-NEXT: store i32 [[TMP8]], i32* [[SB6_A6]], align 4
+; CHECK-NEXT: [[SB6_A6:%.*]] = getelementptr i32, ptr [[PCMP]], i64 3
+; CHECK-NEXT: store i32 [[TMP8]], ptr [[SB6_A6]], align 4
; CHECK-NEXT: ret void
;
- %p0 = getelementptr [7 x i8], [7 x i8]* @a, i64 0, i64 0
- %p6 = getelementptr [7 x i8], [7 x i8]* @a, i64 0, i64 6
+ %p6 = getelementptr [7 x i8], ptr @a, i64 0, i64 6
- %q0 = getelementptr [7 x i8], [7 x i8]* @b, i64 0, i64 0
- %q6 = getelementptr [7 x i8], [7 x i8]* @b, i64 0, i64 6
+ %q6 = getelementptr [7 x i8], ptr @b, i64 0, i64 6
; Fold memcmp(a, b, n) to -1.
- %ca0_b0 = call i32 @memcmp(i8* %p0, i8* %q0, i64 %n)
- %sa0_b0 = getelementptr i32, i32* %pcmp, i64 0
- store i32 %ca0_b0, i32* %sa0_b0
+ %ca0_b0 = call i32 @memcmp(ptr @a, ptr @b, i64 %n)
+ store i32 %ca0_b0, ptr %pcmp
; Fold memcmp(b, a, n) to +1.
- %cb0_a0 = call i32 @memcmp(i8* %q0, i8* %p0, i64 %n)
- %sb0_a0 = getelementptr i32, i32* %pcmp, i64 1
- store i32 %cb0_a0, i32* %sb0_a0
+ %cb0_a0 = call i32 @memcmp(ptr @b, ptr @a, i64 %n)
+ %sb0_a0 = getelementptr i32, ptr %pcmp, i64 1
+ store i32 %cb0_a0, ptr %sb0_a0
; Fold memcmp(a + 6, b + 6, n) to -1.
- %ca6_b6 = call i32 @memcmp(i8* %p6, i8* %q6, i64 %n)
- %sa6_b6 = getelementptr i32, i32* %pcmp, i64 2
- store i32 %ca6_b6, i32* %sa6_b6
+ %ca6_b6 = call i32 @memcmp(ptr %p6, ptr %q6, i64 %n)
+ %sa6_b6 = getelementptr i32, ptr %pcmp, i64 2
+ store i32 %ca6_b6, ptr %sa6_b6
; Fold memcmp(b + 6, a + 6, n) to +1.
- %cb6_a6 = call i32 @memcmp(i8* %q6, i8* %p6, i64 %n)
- %sb6_a6 = getelementptr i32, i32* %pcmp, i64 3
- store i32 %cb6_a6, i32* %sb6_a6
+ %cb6_a6 = call i32 @memcmp(ptr %q6, ptr %p6, i64 %n)
+ %sb6_a6 = getelementptr i32, ptr %pcmp, i64 3
+ store i32 %cb6_a6, ptr %sb6_a6
ret void
}
; pointers into constant arrays of types larger than char and fractional
; offsets.
-declare i32 @memcmp(i8*, i8*, i64)
+declare i32 @memcmp(ptr, ptr, i64)
@i32a = constant [2 x i16] [i16 4386, i16 13124]
@i32b = constant [2 x i16] [i16 4386, i16 13124]
-define void @fold_memcmp_i32a_i32b_pIb(i32 %I, i32* %pcmp)
+define void @fold_memcmp_i32a_i32b_pIb(i32 %I, ptr %pcmp)
; CHECK-LABEL: @fold_memcmp_i32a_i32b_pIb(
-; CHECK-NEXT: store i32 0, i32* [[PCMP:%.*]], align 4
-; CHECK-NEXT: [[PST_1_1_2:%.*]] = getelementptr i32, i32* [[PCMP]], i64 1
-; CHECK-NEXT: store i32 0, i32* [[PST_1_1_2]], align 4
-; CHECK-NEXT: [[PST_1_1_3:%.*]] = getelementptr i32, i32* [[PCMP]], i64 2
-; CHECK-NEXT: store i32 0, i32* [[PST_1_1_3]], align 4
+; CHECK-NEXT: store i32 0, ptr [[PCMP:%.*]], align 4
+; CHECK-NEXT: [[PST_1_1_2:%.*]] = getelementptr i32, ptr [[PCMP]], i64 1
+; CHECK-NEXT: store i32 0, ptr [[PST_1_1_2]], align 4
+; CHECK-NEXT: [[PST_1_1_3:%.*]] = getelementptr i32, ptr [[PCMP]], i64 2
+; CHECK-NEXT: store i32 0, ptr [[PST_1_1_3]], align 4
; CHECK-NEXT: ret void
;
{
- %pi32a = getelementptr [2 x i16], [2 x i16]* @i32a, i32 0, i32 0
- %pi32b = getelementptr [2 x i16], [2 x i16]* @i32b, i32 0, i32 0
- %pi8a = bitcast i16* %pi32a to i8*
- %pi8b = bitcast i16* %pi32b to i8*
- %pi8ap1 = getelementptr i8, i8* %pi8a, i32 1
- %pi8bp1 = getelementptr i8, i8* %pi8b, i32 1
+ %pi8ap1 = getelementptr i8, ptr @i32a, i32 1
+ %pi8bp1 = getelementptr i8, ptr @i32b, i32 1
- %pst_1_1_1 = getelementptr i32, i32* %pcmp, i32 0
- %cmp_1_1_1 = call i32 @memcmp(i8* %pi8ap1, i8* %pi8ap1, i64 1)
- store i32 %cmp_1_1_1, i32* %pst_1_1_1
+ %cmp_1_1_1 = call i32 @memcmp(ptr %pi8ap1, ptr %pi8ap1, i64 1)
+ store i32 %cmp_1_1_1, ptr %pcmp
- %pst_1_1_2 = getelementptr i32, i32* %pcmp, i32 1
- %cmp_1_1_2 = call i32 @memcmp(i8* %pi8ap1, i8* %pi8ap1, i64 2)
- store i32 %cmp_1_1_2, i32* %pst_1_1_2
+ %pst_1_1_2 = getelementptr i32, ptr %pcmp, i32 1
+ %cmp_1_1_2 = call i32 @memcmp(ptr %pi8ap1, ptr %pi8ap1, i64 2)
+ store i32 %cmp_1_1_2, ptr %pst_1_1_2
- %pst_1_1_3 = getelementptr i32, i32* %pcmp, i32 2
- %cmp_1_1_3 = call i32 @memcmp(i8* %pi8ap1, i8* %pi8ap1, i64 3)
- store i32 %cmp_1_1_3, i32* %pst_1_1_3
+ %pst_1_1_3 = getelementptr i32, ptr %pcmp, i32 2
+ %cmp_1_1_3 = call i32 @memcmp(ptr %pi8ap1, ptr %pi8ap1, i64 3)
+ store i32 %cmp_1_1_3, ptr %pst_1_1_3
ret void
}
@a = constant [1 x %struct.A] [%struct.A { [4 x i8] [i8 1, i8 2, i8 3, i8 4] }]
@b = constant [1 x %struct.B] [%struct.B { [2 x i8] [i8 1, i8 2], [2 x i8] [i8 3, i8 4]}]
-define void @fold_memcmp_A_B_pIb(i32 %I, i32* %pcmp) {
+define void @fold_memcmp_A_B_pIb(i32 %I, ptr %pcmp) {
; CHECK-LABEL: @fold_memcmp_A_B_pIb(
-; CHECK-NEXT: store i32 0, i32* [[PCMP:%.*]], align 4
-; CHECK-NEXT: [[PST_0_0_2:%.*]] = getelementptr i32, i32* [[PCMP]], i64 1
-; CHECK-NEXT: store i32 0, i32* [[PST_0_0_2]], align 4
-; CHECK-NEXT: [[PST_0_0_3:%.*]] = getelementptr i32, i32* [[PCMP]], i64 2
-; CHECK-NEXT: store i32 0, i32* [[PST_0_0_3]], align 4
-; CHECK-NEXT: [[PST_0_0_4:%.*]] = getelementptr i32, i32* [[PCMP]], i64 3
-; CHECK-NEXT: store i32 0, i32* [[PST_0_0_4]], align 4
-; CHECK-NEXT: [[PST_0_1_1:%.*]] = getelementptr i32, i32* [[PCMP]], i64 4
-; CHECK-NEXT: store i32 -1, i32* [[PST_0_1_1]], align 4
-; CHECK-NEXT: [[PST_0_1_2:%.*]] = getelementptr i32, i32* [[PCMP]], i64 5
-; CHECK-NEXT: store i32 -1, i32* [[PST_0_1_2]], align 4
-; CHECK-NEXT: [[PST_0_1_3:%.*]] = getelementptr i32, i32* [[PCMP]], i64 6
-; CHECK-NEXT: store i32 -1, i32* [[PST_0_1_3]], align 4
-; CHECK-NEXT: [[PST_1_0_1:%.*]] = getelementptr i32, i32* [[PCMP]], i64 4
-; CHECK-NEXT: store i32 1, i32* [[PST_1_0_1]], align 4
-; CHECK-NEXT: [[PST_1_0_2:%.*]] = getelementptr i32, i32* [[PCMP]], i64 5
-; CHECK-NEXT: store i32 1, i32* [[PST_1_0_2]], align 4
-; CHECK-NEXT: [[PST_1_0_3:%.*]] = getelementptr i32, i32* [[PCMP]], i64 6
-; CHECK-NEXT: store i32 1, i32* [[PST_1_0_3]], align 4
+; CHECK-NEXT: store i32 0, ptr [[PCMP:%.*]], align 4
+; CHECK-NEXT: [[PST_0_0_2:%.*]] = getelementptr i32, ptr [[PCMP]], i64 1
+; CHECK-NEXT: store i32 0, ptr [[PST_0_0_2]], align 4
+; CHECK-NEXT: [[PST_0_0_3:%.*]] = getelementptr i32, ptr [[PCMP]], i64 2
+; CHECK-NEXT: store i32 0, ptr [[PST_0_0_3]], align 4
+; CHECK-NEXT: [[PST_0_0_4:%.*]] = getelementptr i32, ptr [[PCMP]], i64 3
+; CHECK-NEXT: store i32 0, ptr [[PST_0_0_4]], align 4
+; CHECK-NEXT: [[PST_0_1_1:%.*]] = getelementptr i32, ptr [[PCMP]], i64 4
+; CHECK-NEXT: store i32 -1, ptr [[PST_0_1_1]], align 4
+; CHECK-NEXT: [[PST_0_1_2:%.*]] = getelementptr i32, ptr [[PCMP]], i64 5
+; CHECK-NEXT: store i32 -1, ptr [[PST_0_1_2]], align 4
+; CHECK-NEXT: [[PST_0_1_3:%.*]] = getelementptr i32, ptr [[PCMP]], i64 6
+; CHECK-NEXT: store i32 -1, ptr [[PST_0_1_3]], align 4
+; CHECK-NEXT: [[PST_1_0_1:%.*]] = getelementptr i32, ptr [[PCMP]], i64 4
+; CHECK-NEXT: store i32 1, ptr [[PST_1_0_1]], align 4
+; CHECK-NEXT: [[PST_1_0_2:%.*]] = getelementptr i32, ptr [[PCMP]], i64 5
+; CHECK-NEXT: store i32 1, ptr [[PST_1_0_2]], align 4
+; CHECK-NEXT: [[PST_1_0_3:%.*]] = getelementptr i32, ptr [[PCMP]], i64 6
+; CHECK-NEXT: store i32 1, ptr [[PST_1_0_3]], align 4
; CHECK-NEXT: ret void
;
- %pa = getelementptr [1 x %struct.A], [1 x %struct.A]* @a, i64 0, i64 0
- %pb = getelementptr [1 x %struct.B], [1 x %struct.B]* @b, i64 0, i64 0
- %pi8a = bitcast %struct.A* %pa to i8*
- %pi8b = bitcast %struct.B* %pb to i8*
- %pi8ap0 = getelementptr i8, i8* %pi8a, i32 0
- %pi8bp0 = getelementptr i8, i8* %pi8b, i32 0
; Fold memcmp(&a, &b, 1) to 0;
- %pst_0_0_1 = getelementptr i32, i32* %pcmp, i32 0
- %cmp_0_0_1 = call i32 @memcmp(i8* %pi8ap0, i8* %pi8bp0, i64 1)
- store i32 %cmp_0_0_1, i32* %pst_0_0_1
+ %cmp_0_0_1 = call i32 @memcmp(ptr @a, ptr @b, i64 1)
+ store i32 %cmp_0_0_1, ptr %pcmp
; Fold memcmp(&a, &b, 2) to 0;
- %pst_0_0_2 = getelementptr i32, i32* %pcmp, i32 1
- %cmp_0_0_2 = call i32 @memcmp(i8* %pi8ap0, i8* %pi8bp0, i64 2)
- store i32 %cmp_0_0_2, i32* %pst_0_0_2
+ %pst_0_0_2 = getelementptr i32, ptr %pcmp, i32 1
+ %cmp_0_0_2 = call i32 @memcmp(ptr @a, ptr @b, i64 2)
+ store i32 %cmp_0_0_2, ptr %pst_0_0_2
; Fold memcmp(&a, &b, 3) to 0;
- %pst_0_0_3 = getelementptr i32, i32* %pcmp, i32 2
- %cmp_0_0_3 = call i32 @memcmp(i8* %pi8ap0, i8* %pi8bp0, i64 3)
- store i32 %cmp_0_0_3, i32* %pst_0_0_3
+ %pst_0_0_3 = getelementptr i32, ptr %pcmp, i32 2
+ %cmp_0_0_3 = call i32 @memcmp(ptr @a, ptr @b, i64 3)
+ store i32 %cmp_0_0_3, ptr %pst_0_0_3
; Fold memcmp(&a, &b, 4) to 0;
- %pst_0_0_4 = getelementptr i32, i32* %pcmp, i32 3
- %cmp_0_0_4 = call i32 @memcmp(i8* %pi8ap0, i8* %pi8bp0, i64 4)
- store i32 %cmp_0_0_4, i32* %pst_0_0_4
+ %pst_0_0_4 = getelementptr i32, ptr %pcmp, i32 3
+ %cmp_0_0_4 = call i32 @memcmp(ptr @a, ptr @b, i64 4)
+ store i32 %cmp_0_0_4, ptr %pst_0_0_4
- %pi8bp1 = getelementptr i8, i8* %pi8b, i32 1
+ %pi8bp1 = getelementptr i8, ptr @b, i32 1
; Fold memcmp(&a, (char*)&b + 1, 1) to -1;
- %pst_0_1_1 = getelementptr i32, i32* %pcmp, i32 4
- %cmp_0_1_1 = call i32 @memcmp(i8* %pi8ap0, i8* %pi8bp1, i64 1)
- store i32 %cmp_0_1_1, i32* %pst_0_1_1
+ %pst_0_1_1 = getelementptr i32, ptr %pcmp, i32 4
+ %cmp_0_1_1 = call i32 @memcmp(ptr @a, ptr %pi8bp1, i64 1)
+ store i32 %cmp_0_1_1, ptr %pst_0_1_1
; Fold memcmp(&a, (char*)&b + 1, 2) to -1;
- %pst_0_1_2 = getelementptr i32, i32* %pcmp, i32 5
- %cmp_0_1_2 = call i32 @memcmp(i8* %pi8ap0, i8* %pi8bp1, i64 2)
- store i32 %cmp_0_1_2, i32* %pst_0_1_2
+ %pst_0_1_2 = getelementptr i32, ptr %pcmp, i32 5
+ %cmp_0_1_2 = call i32 @memcmp(ptr @a, ptr %pi8bp1, i64 2)
+ store i32 %cmp_0_1_2, ptr %pst_0_1_2
; Fold memcmp(&a, (char*)&b + 1, 3) to -1;
- %pst_0_1_3 = getelementptr i32, i32* %pcmp, i32 6
- %cmp_0_1_3 = call i32 @memcmp(i8* %pi8ap0, i8* %pi8bp1, i64 3)
- store i32 %cmp_0_1_3, i32* %pst_0_1_3
+ %pst_0_1_3 = getelementptr i32, ptr %pcmp, i32 6
+ %cmp_0_1_3 = call i32 @memcmp(ptr @a, ptr %pi8bp1, i64 3)
+ store i32 %cmp_0_1_3, ptr %pst_0_1_3
- %pi8ap1 = getelementptr i8, i8* %pi8a, i32 1
+ %pi8ap1 = getelementptr i8, ptr @a, i32 1
; Fold memcmp((char*)&a + 1, &b, 1) to +1;
- %pst_1_0_1 = getelementptr i32, i32* %pcmp, i32 4
- %cmp_1_0_1 = call i32 @memcmp(i8* %pi8ap1, i8* %pi8bp0, i64 1)
- store i32 %cmp_1_0_1, i32* %pst_1_0_1
+ %pst_1_0_1 = getelementptr i32, ptr %pcmp, i32 4
+ %cmp_1_0_1 = call i32 @memcmp(ptr %pi8ap1, ptr @b, i64 1)
+ store i32 %cmp_1_0_1, ptr %pst_1_0_1
; Fold memcmp((char*)&a + 1, &b, 2) to +1;
- %pst_1_0_2 = getelementptr i32, i32* %pcmp, i32 5
- %cmp_1_0_2 = call i32 @memcmp(i8* %pi8ap1, i8* %pi8bp0, i64 2)
- store i32 %cmp_1_0_2, i32* %pst_1_0_2
+ %pst_1_0_2 = getelementptr i32, ptr %pcmp, i32 5
+ %cmp_1_0_2 = call i32 @memcmp(ptr %pi8ap1, ptr @b, i64 2)
+ store i32 %cmp_1_0_2, ptr %pst_1_0_2
; Fold memcmp((char*)&a + 1, &b, 3) to +1;
- %pst_1_0_3 = getelementptr i32, i32* %pcmp, i32 6
- %cmp_1_0_3 = call i32 @memcmp(i8* %pi8ap1, i8* %pi8bp0, i64 3)
- store i32 %cmp_1_0_3, i32* %pst_1_0_3
+ %pst_1_0_3 = getelementptr i32, ptr %pcmp, i32 6
+ %cmp_1_0_3 = call i32 @memcmp(ptr %pi8ap1, ptr @b, i64 3)
+ store i32 %cmp_1_0_3, ptr %pst_1_0_3
ret void
}
; Verify that the result of memrchr calls with past-the-end pointers used
; don't cause trouble and are optimally folded.
-declare i32 @memcmp(i8*, i8*, i64)
+declare i32 @memcmp(ptr, ptr, i64)
@a5 = constant [5 x i8] c"12345";
; CHECK-LABEL: @fold_memcmp_a5_a5p5_n(
; CHECK-NEXT: ret i32 0
;
- %pa5_p0 = getelementptr [5 x i8], [5 x i8]* @a5, i32 0, i32 0
- %pa5_p5 = getelementptr [5 x i8], [5 x i8]* @a5, i32 0, i32 5
- %cmp = call i32 @memcmp(i8* %pa5_p0, i8* %pa5_p5, i64 %n)
+ %pa5_p5 = getelementptr [5 x i8], ptr @a5, i32 0, i32 5
+ %cmp = call i32 @memcmp(ptr @a5, ptr %pa5_p5, i64 %n)
ret i32 %cmp
}
; CHECK-LABEL: @fold_memcmp_a5p5_a5p5_n(
; CHECK-NEXT: ret i32 0
;
- %pa5_p5 = getelementptr [5 x i8], [5 x i8]* @a5, i32 0, i32 5
- %qa5_p5 = getelementptr [5 x i8], [5 x i8]* @a5, i32 0, i32 5
- %cmp = call i32 @memcmp(i8* %pa5_p5, i8* %qa5_p5, i64 %n)
+ %pa5_p5 = getelementptr [5 x i8], ptr @a5, i32 0, i32 5
+ %qa5_p5 = getelementptr [5 x i8], ptr @a5, i32 0, i32 5
+ %cmp = call i32 @memcmp(ptr %pa5_p5, ptr %qa5_p5, i64 %n)
ret i32 %cmp
}
define i32 @fold_memcmp_a5pi_a5p5_n(i32 %i, i64 %n) {
; CHECK-LABEL: @fold_memcmp_a5pi_a5p5_n(
; CHECK-NEXT: [[TMP1:%.*]] = sext i32 [[I:%.*]] to i64
-; CHECK-NEXT: [[PA5_PI:%.*]] = getelementptr [5 x i8], [5 x i8]* @a5, i64 0, i64 [[TMP1]]
-; CHECK-NEXT: [[CMP:%.*]] = call i32 @memcmp(i8* [[PA5_PI]], i8* getelementptr inbounds ([5 x i8], [5 x i8]* @a5, i64 1, i64 0), i64 [[N:%.*]])
+; CHECK-NEXT: [[PA5_PI:%.*]] = getelementptr [5 x i8], ptr @a5, i64 0, i64 [[TMP1]]
+; CHECK-NEXT: [[CMP:%.*]] = call i32 @memcmp(ptr [[PA5_PI]], ptr getelementptr inbounds ([5 x i8], ptr @a5, i64 1, i64 0), i64 [[N:%.*]])
; CHECK-NEXT: ret i32 [[CMP]]
;
- %pa5_pi = getelementptr [5 x i8], [5 x i8]* @a5, i32 0, i32 %i
- %pa5_p5 = getelementptr [5 x i8], [5 x i8]* @a5, i32 0, i32 5
- %cmp = call i32 @memcmp(i8* %pa5_pi, i8* %pa5_p5, i64 %n)
+ %pa5_pi = getelementptr [5 x i8], ptr @a5, i32 0, i32 %i
+ %pa5_p5 = getelementptr [5 x i8], ptr @a5, i32 0, i32 5
+ %cmp = call i32 @memcmp(ptr %pa5_pi, ptr %pa5_p5, i64 %n)
ret i32 %cmp
}
; RUN: opt < %s -passes=instcombine -S -data-layout=e-n32 | FileCheck %s --check-prefix=ALL --check-prefix=LE
; RUN: opt < %s -passes=instcombine -S -data-layout=E-n32 | FileCheck %s --check-prefix=ALL --check-prefix=BE
-declare i32 @memcmp(i8*, i8*, i64)
+declare i32 @memcmp(ptr, ptr, i64)
; The alignment of this constant does not matter. We constant fold the load.
@charbuf = private unnamed_addr constant [4 x i8] [i8 0, i8 0, i8 0, i8 1], align 1
-define i1 @memcmp_4bytes_unaligned_constant_i8(i8* align 4 %x) {
+define i1 @memcmp_4bytes_unaligned_constant_i8(ptr align 4 %x) {
; LE-LABEL: @memcmp_4bytes_unaligned_constant_i8(
-; LE-NEXT: [[TMP1:%.*]] = bitcast i8* [[X:%.*]] to i32*
-; LE-NEXT: [[LHSV:%.*]] = load i32, i32* [[TMP1]], align 4
+; LE-NEXT: [[LHSV:%.*]] = load i32, ptr [[X:%.*]], align 4
; LE-NEXT: [[DOTNOT:%.*]] = icmp eq i32 [[LHSV]], 16777216
; LE-NEXT: ret i1 [[DOTNOT]]
;
; BE-LABEL: @memcmp_4bytes_unaligned_constant_i8(
-; BE-NEXT: [[TMP1:%.*]] = bitcast i8* [[X:%.*]] to i32*
-; BE-NEXT: [[LHSV:%.*]] = load i32, i32* [[TMP1]], align 4
+; BE-NEXT: [[LHSV:%.*]] = load i32, ptr [[X:%.*]], align 4
; BE-NEXT: [[DOTNOT:%.*]] = icmp eq i32 [[LHSV]], 1
; BE-NEXT: ret i1 [[DOTNOT]]
;
- %call = tail call i32 @memcmp(i8* %x, i8* getelementptr inbounds ([4 x i8], [4 x i8]* @charbuf, i64 0, i64 0), i64 4)
+ %call = tail call i32 @memcmp(ptr %x, ptr @charbuf, i64 4)
%cmpeq0 = icmp eq i32 %call, 0
ret i1 %cmpeq0
}
@intbuf_unaligned = private unnamed_addr constant [4 x i16] [i16 1, i16 2, i16 3, i16 4], align 1
-define i1 @memcmp_4bytes_unaligned_constant_i16(i8* align 4 %x) {
+define i1 @memcmp_4bytes_unaligned_constant_i16(ptr align 4 %x) {
; LE-LABEL: @memcmp_4bytes_unaligned_constant_i16(
-; LE-NEXT: [[TMP1:%.*]] = bitcast i8* [[X:%.*]] to i32*
-; LE-NEXT: [[RHSV:%.*]] = load i32, i32* [[TMP1]], align 4
+; LE-NEXT: [[RHSV:%.*]] = load i32, ptr [[X:%.*]], align 4
; LE-NEXT: [[DOTNOT:%.*]] = icmp eq i32 [[RHSV]], 131073
; LE-NEXT: ret i1 [[DOTNOT]]
;
; BE-LABEL: @memcmp_4bytes_unaligned_constant_i16(
-; BE-NEXT: [[TMP1:%.*]] = bitcast i8* [[X:%.*]] to i32*
-; BE-NEXT: [[RHSV:%.*]] = load i32, i32* [[TMP1]], align 4
+; BE-NEXT: [[RHSV:%.*]] = load i32, ptr [[X:%.*]], align 4
; BE-NEXT: [[DOTNOT:%.*]] = icmp eq i32 [[RHSV]], 65538
; BE-NEXT: ret i1 [[DOTNOT]]
;
- %call = tail call i32 @memcmp(i8* bitcast (i16* getelementptr inbounds ([4 x i16], [4 x i16]* @intbuf_unaligned, i64 0, i64 0) to i8*), i8* %x, i64 4)
+ %call = tail call i32 @memcmp(ptr @intbuf_unaligned, ptr %x, i64 4)
%cmpeq0 = icmp eq i32 %call, 0
ret i1 %cmpeq0
}
@intbuf = private unnamed_addr constant [2 x i32] [i32 0, i32 1], align 4
-define i1 @memcmp_3bytes_aligned_constant_i32(i8* align 4 %x) {
+define i1 @memcmp_3bytes_aligned_constant_i32(ptr align 4 %x) {
; LE-LABEL: @memcmp_3bytes_aligned_constant_i32(
; LE-NEXT: ret i1 false
;
; BE-LABEL: @memcmp_3bytes_aligned_constant_i32(
; BE-NEXT: ret i1 true
;
- %call = tail call i32 @memcmp(i8* bitcast (i32* getelementptr inbounds ([2 x i32], [2 x i32]* @intbuf, i64 0, i64 1) to i8*), i8* bitcast (i32* getelementptr inbounds ([2 x i32], [2 x i32]* @intbuf, i64 0, i64 0) to i8*), i64 3)
+ %call = tail call i32 @memcmp(ptr getelementptr inbounds ([2 x i32], ptr @intbuf, i64 0, i64 1), ptr @intbuf, i64 3)
%cmpeq0 = icmp eq i32 %call, 0
ret i1 %cmpeq0
}
; A sloppy implementation would infinite loop by recreating the unused instructions.
-define i1 @memcmp_4bytes_one_unaligned_i8(i8* align 4 %x, i8* align 1 %y) {
+define i1 @memcmp_4bytes_one_unaligned_i8(ptr align 4 %x, ptr align 1 %y) {
; ALL-LABEL: @memcmp_4bytes_one_unaligned_i8(
-; ALL-NEXT: [[CALL:%.*]] = tail call i32 @memcmp(i8* noundef nonnull dereferenceable(4) [[X:%.*]], i8* noundef nonnull dereferenceable(4) [[Y:%.*]], i64 4)
+; ALL-NEXT: [[CALL:%.*]] = tail call i32 @memcmp(ptr noundef nonnull dereferenceable(4) [[X:%.*]], ptr noundef nonnull dereferenceable(4) [[Y:%.*]], i64 4)
; ALL-NEXT: [[CMPEQ0:%.*]] = icmp eq i32 [[CALL]], 0
; ALL-NEXT: ret i1 [[CMPEQ0]]
;
- %bc = bitcast i8* %x to i32*
- %lhsv = load i32, i32* %bc
- %call = tail call i32 @memcmp(i8* %x, i8* %y, i64 4)
+ %lhsv = load i32, ptr %x
+ %call = tail call i32 @memcmp(ptr %x, ptr %y, i64 4)
%cmpeq0 = icmp eq i32 %call, 0
ret i1 %cmpeq0
}
target datalayout = "e-p:32:32:32-i1:8:8-i8:8:8-i16:16:16-i32:32:32-i64:32:64-f32:32:32-f64:32:64-v64:64:64-v128:128:128-a0:0:64-f80:128:128"
-declare i8* @memcpy(i8*, i8*, i32)
+declare ptr @memcpy(ptr, ptr, i32)
; Check memcpy(mem1, mem2, size) -> llvm.memcpy(mem1, mem2, size, 1).
-define i8* @test_simplify1(i8* %mem1, i8* %mem2, i32 %size) {
+define ptr @test_simplify1(ptr %mem1, ptr %mem2, i32 %size) {
; CHECK-LABEL: @test_simplify1(
-; CHECK-NEXT: call void @llvm.memcpy.p0i8.p0i8.i32(i8* align 1 [[MEM1:%.*]], i8* align 1 [[MEM2:%.*]], i32 [[SIZE:%.*]], i1 false)
-; CHECK-NEXT: ret i8* [[MEM1]]
+; CHECK-NEXT: call void @llvm.memcpy.p0.p0.i32(ptr align 1 [[MEM1:%.*]], ptr align 1 [[MEM2:%.*]], i32 [[SIZE:%.*]], i1 false)
+; CHECK-NEXT: ret ptr [[MEM1]]
;
- %ret = call i8* @memcpy(i8* %mem1, i8* %mem2, i32 %size)
- ret i8* %ret
+ %ret = call ptr @memcpy(ptr %mem1, ptr %mem2, i32 %size)
+ ret ptr %ret
}
; Verify that the strictfp attr doesn't block this optimization.
-define i8* @test_simplify2(i8* %mem1, i8* %mem2, i32 %size) strictfp {
+define ptr @test_simplify2(ptr %mem1, ptr %mem2, i32 %size) strictfp {
; CHECK-LABEL: @test_simplify2(
-; CHECK-NEXT: call void @llvm.memcpy.p0i8.p0i8.i32(i8* align 1 [[MEM1:%.*]], i8* align 1 [[MEM2:%.*]], i32 [[SIZE:%.*]], i1 false) #[[ATTR0:[0-9]+]]
-; CHECK-NEXT: ret i8* [[MEM1]]
+; CHECK-NEXT: call void @llvm.memcpy.p0.p0.i32(ptr align 1 [[MEM1:%.*]], ptr align 1 [[MEM2:%.*]], i32 [[SIZE:%.*]], i1 false) #[[ATTR0:[0-9]+]]
+; CHECK-NEXT: ret ptr [[MEM1]]
;
- %ret = call i8* @memcpy(i8* %mem1, i8* %mem2, i32 %size) strictfp
- ret i8* %ret
+ %ret = call ptr @memcpy(ptr %mem1, ptr %mem2, i32 %size) strictfp
+ ret ptr %ret
}
; Verify that the first parameter to memcpy could itself be a call that's not
; tail, while the call to @memcpy could be tail.
-declare i8* @get_dest()
+declare ptr @get_dest()
-define i8* @test_simplify3(i8* %mem2, i32 %size) {
+define ptr @test_simplify3(ptr %mem2, i32 %size) {
; CHECK-LABEL: @test_simplify3(
-; CHECK-NEXT: [[DEST:%.*]] = call i8* @get_dest()
-; CHECK-NEXT: tail call void @llvm.memcpy.p0i8.p0i8.i32(i8* align 1 [[DEST]], i8* align 1 [[MEM2:%.*]], i32 [[SIZE:%.*]], i1 false)
-; CHECK-NEXT: ret i8* [[DEST]]
+; CHECK-NEXT: [[DEST:%.*]] = call ptr @get_dest()
+; CHECK-NEXT: tail call void @llvm.memcpy.p0.p0.i32(ptr align 1 [[DEST]], ptr align 1 [[MEM2:%.*]], i32 [[SIZE:%.*]], i1 false)
+; CHECK-NEXT: ret ptr [[DEST]]
;
- %dest = call i8* @get_dest()
- %ret = tail call i8* @memcpy(i8* %dest, i8* %mem2, i32 %size)
- ret i8* %ret
+ %dest = call ptr @get_dest()
+ %ret = tail call ptr @memcpy(ptr %dest, ptr %mem2, i32 %size)
+ ret ptr %ret
}
-define i8* @test_no_incompatible_attr(i8* %mem1, i8* %mem2, i32 %size) {
+define ptr @test_no_incompatible_attr(ptr %mem1, ptr %mem2, i32 %size) {
; CHECK-LABEL: @test_no_incompatible_attr(
-; CHECK-NEXT: call void @llvm.memcpy.p0i8.p0i8.i32(i8* align 1 [[MEM1:%.*]], i8* align 1 [[MEM2:%.*]], i32 [[SIZE:%.*]], i1 false)
-; CHECK-NEXT: ret i8* [[MEM1]]
+; CHECK-NEXT: call void @llvm.memcpy.p0.p0.i32(ptr align 1 [[MEM1:%.*]], ptr align 1 [[MEM2:%.*]], i32 [[SIZE:%.*]], i1 false)
+; CHECK-NEXT: ret ptr [[MEM1]]
;
- %ret = call dereferenceable(1) i8* @memcpy(i8* %mem1, i8* %mem2, i32 %size)
- ret i8* %ret
+ %ret = call dereferenceable(1) ptr @memcpy(ptr %mem1, ptr %mem2, i32 %size)
+ ret ptr %ret
}
-define i8* @test_no_simplify1(i8* %mem1, i8* %mem2, i32 %size) {
+define ptr @test_no_simplify1(ptr %mem1, ptr %mem2, i32 %size) {
; CHECK-LABEL: @test_no_simplify1(
-; CHECK-NEXT: [[RET:%.*]] = musttail call i8* @memcpy(i8* [[MEM1:%.*]], i8* [[MEM2:%.*]], i32 [[SIZE:%.*]])
-; CHECK-NEXT: ret i8* [[RET]]
+; CHECK-NEXT: [[RET:%.*]] = musttail call ptr @memcpy(ptr [[MEM1:%.*]], ptr [[MEM2:%.*]], i32 [[SIZE:%.*]])
+; CHECK-NEXT: ret ptr [[RET]]
;
- %ret = musttail call i8* @memcpy(i8* %mem1, i8* %mem2, i32 %size)
- ret i8* %ret
+ %ret = musttail call ptr @memcpy(ptr %mem1, ptr %mem2, i32 %size)
+ ret ptr %ret
}
target datalayout = "e-p:32:32:32-i1:8:8-i8:8:8-i16:16:16-i32:32:32-i64:32:64-f32:32:32-f64:32:64-v64:64:64-v128:128:128-a0:0:64-f80:128:128"
-declare i8 @memcpy(i8*, i8*, i32)
+declare i8 @memcpy(ptr, ptr, i32)
; Check that memcpy functions with the wrong prototype (doesn't return a pointer) aren't simplified.
-define i8 @test_no_simplify1(i8* %mem1, i8* %mem2, i32 %size) {
+define i8 @test_no_simplify1(ptr %mem1, ptr %mem2, i32 %size) {
; CHECK-LABEL: @test_no_simplify1(
-; CHECK-NEXT: [[RET:%.*]] = call i8 @memcpy(i8* %mem1, i8* %mem2, i32 %size)
+; CHECK-NEXT: [[RET:%.*]] = call i8 @memcpy(ptr %mem1, ptr %mem2, i32 %size)
; CHECK-NEXT: ret i8 [[RET]]
;
- %ret = call i8 @memcpy(i8* %mem1, i8* %mem2, i32 %size)
+ %ret = call i8 @memcpy(ptr %mem1, ptr %mem2, i32 %size)
ret i8 %ret
}
; RUN: opt < %s -passes=instcombine -S -data-layout=n32:64 | FileCheck %s
; RUN: opt < %s -passes=instcombine -S -data-layout=n32:64:128 | FileCheck %s
-declare void @llvm.memcpy.p0i8.p0i8.i32(i8* nocapture, i8* nocapture, i32, i1) nounwind
+declare void @llvm.memcpy.p0.p0.i32(ptr nocapture, ptr nocapture, i32, i1) nounwind
; memcpy can be expanded inline with load/store (based on the datalayout?)
-define void @copy_1_byte(i8* %d, i8* %s) {
+define void @copy_1_byte(ptr %d, ptr %s) {
; CHECK-LABEL: @copy_1_byte(
-; CHECK-NEXT: [[TMP1:%.*]] = load i8, i8* [[S:%.*]], align 1
-; CHECK-NEXT: store i8 [[TMP1]], i8* [[D:%.*]], align 1
+; CHECK-NEXT: [[TMP1:%.*]] = load i8, ptr [[S:%.*]], align 1
+; CHECK-NEXT: store i8 [[TMP1]], ptr [[D:%.*]], align 1
; CHECK-NEXT: ret void
;
- call void @llvm.memcpy.p0i8.p0i8.i32(i8* %d, i8* %s, i32 1, i1 false)
+ call void @llvm.memcpy.p0.p0.i32(ptr %d, ptr %s, i32 1, i1 false)
ret void
}
-define void @copy_2_bytes(i8* %d, i8* %s) {
+define void @copy_2_bytes(ptr %d, ptr %s) {
; CHECK-LABEL: @copy_2_bytes(
-; CHECK-NEXT: [[TMP1:%.*]] = bitcast i8* [[S:%.*]] to i16*
-; CHECK-NEXT: [[TMP2:%.*]] = bitcast i8* [[D:%.*]] to i16*
-; CHECK-NEXT: [[TMP3:%.*]] = load i16, i16* [[TMP1]], align 1
-; CHECK-NEXT: store i16 [[TMP3]], i16* [[TMP2]], align 1
+; CHECK-NEXT: [[TMP3:%.*]] = load i16, ptr [[S:%.*]], align 1
+; CHECK-NEXT: store i16 [[TMP3]], ptr [[D:%.*]], align 1
; CHECK-NEXT: ret void
;
- call void @llvm.memcpy.p0i8.p0i8.i32(i8* %d, i8* %s, i32 2, i1 false)
+ call void @llvm.memcpy.p0.p0.i32(ptr %d, ptr %s, i32 2, i1 false)
ret void
}
; We don't expand small non-power-of-2. Should we? Might be a target-dependent choice.
-define void @copy_3_bytes(i8* %d, i8* %s) {
+define void @copy_3_bytes(ptr %d, ptr %s) {
; CHECK-LABEL: @copy_3_bytes(
-; CHECK-NEXT: call void @llvm.memcpy.p0i8.p0i8.i32(i8* noundef nonnull align 1 dereferenceable(3) [[D:%.*]], i8* noundef nonnull align 1 dereferenceable(3) [[S:%.*]], i32 3, i1 false)
+; CHECK-NEXT: call void @llvm.memcpy.p0.p0.i32(ptr noundef nonnull align 1 dereferenceable(3) [[D:%.*]], ptr noundef nonnull align 1 dereferenceable(3) [[S:%.*]], i32 3, i1 false)
; CHECK-NEXT: ret void
;
- call void @llvm.memcpy.p0i8.p0i8.i32(i8* %d, i8* %s, i32 3, i1 false)
+ call void @llvm.memcpy.p0.p0.i32(ptr %d, ptr %s, i32 3, i1 false)
ret void
}
-define void @copy_4_bytes(i8* %d, i8* %s) {
+define void @copy_4_bytes(ptr %d, ptr %s) {
; CHECK-LABEL: @copy_4_bytes(
-; CHECK-NEXT: [[TMP1:%.*]] = bitcast i8* [[S:%.*]] to i32*
-; CHECK-NEXT: [[TMP2:%.*]] = bitcast i8* [[D:%.*]] to i32*
-; CHECK-NEXT: [[TMP3:%.*]] = load i32, i32* [[TMP1]], align 1
-; CHECK-NEXT: store i32 [[TMP3]], i32* [[TMP2]], align 1
+; CHECK-NEXT: [[TMP3:%.*]] = load i32, ptr [[S:%.*]], align 1
+; CHECK-NEXT: store i32 [[TMP3]], ptr [[D:%.*]], align 1
; CHECK-NEXT: ret void
;
- call void @llvm.memcpy.p0i8.p0i8.i32(i8* %d, i8* %s, i32 4, i1 false)
+ call void @llvm.memcpy.p0.p0.i32(ptr %d, ptr %s, i32 4, i1 false)
ret void
}
; We don't expand small non-power-of-2. Should we? Might be a target-dependent choice.
-define void @copy_5_bytes(i8* %d, i8* %s) {
+define void @copy_5_bytes(ptr %d, ptr %s) {
; CHECK-LABEL: @copy_5_bytes(
-; CHECK-NEXT: call void @llvm.memcpy.p0i8.p0i8.i32(i8* noundef nonnull align 1 dereferenceable(5) [[D:%.*]], i8* noundef nonnull align 1 dereferenceable(5) [[S:%.*]], i32 5, i1 false)
+; CHECK-NEXT: call void @llvm.memcpy.p0.p0.i32(ptr noundef nonnull align 1 dereferenceable(5) [[D:%.*]], ptr noundef nonnull align 1 dereferenceable(5) [[S:%.*]], i32 5, i1 false)
; CHECK-NEXT: ret void
;
- call void @llvm.memcpy.p0i8.p0i8.i32(i8* %d, i8* %s, i32 5, i1 false)
+ call void @llvm.memcpy.p0.p0.i32(ptr %d, ptr %s, i32 5, i1 false)
ret void
}
-define void @copy_8_bytes(i8* %d, i8* %s) {
+define void @copy_8_bytes(ptr %d, ptr %s) {
; CHECK-LABEL: @copy_8_bytes(
-; CHECK-NEXT: [[TMP1:%.*]] = bitcast i8* [[S:%.*]] to i64*
-; CHECK-NEXT: [[TMP2:%.*]] = bitcast i8* [[D:%.*]] to i64*
-; CHECK-NEXT: [[TMP3:%.*]] = load i64, i64* [[TMP1]], align 1
-; CHECK-NEXT: store i64 [[TMP3]], i64* [[TMP2]], align 1
+; CHECK-NEXT: [[TMP3:%.*]] = load i64, ptr [[S:%.*]], align 1
+; CHECK-NEXT: store i64 [[TMP3]], ptr [[D:%.*]], align 1
; CHECK-NEXT: ret void
;
- call void @llvm.memcpy.p0i8.p0i8.i32(i8* %d, i8* %s, i32 8, i1 false)
+ call void @llvm.memcpy.p0.p0.i32(ptr %d, ptr %s, i32 8, i1 false)
ret void
}
-define void @copy_16_bytes(i8* %d, i8* %s) {
+define void @copy_16_bytes(ptr %d, ptr %s) {
; CHECK-LABEL: @copy_16_bytes(
-; CHECK-NEXT: call void @llvm.memcpy.p0i8.p0i8.i32(i8* noundef nonnull align 1 dereferenceable(16) [[D:%.*]], i8* noundef nonnull align 1 dereferenceable(16) [[S:%.*]], i32 16, i1 false)
+; CHECK-NEXT: call void @llvm.memcpy.p0.p0.i32(ptr noundef nonnull align 1 dereferenceable(16) [[D:%.*]], ptr noundef nonnull align 1 dereferenceable(16) [[S:%.*]], i32 16, i1 false)
; CHECK-NEXT: ret void
;
- call void @llvm.memcpy.p0i8.p0i8.i32(i8* %d, i8* %s, i32 16, i1 false)
+ call void @llvm.memcpy.p0.p0.i32(ptr %d, ptr %s, i32 16, i1 false)
ret void
}
; NOTE: Assertions have been autogenerated by utils/update_test_checks.py
; RUN: opt < %s -passes=instcombine -S | FileCheck %s
-declare void @llvm.memcpy.p0i8.p0i8.i32(i8* nocapture, i8* nocapture, i32, i1) nounwind
-declare void @llvm.memcpy.p0i8.p0i8.i64(i8* nocapture, i8* nocapture, i64, i1) nounwind
+declare void @llvm.memcpy.p0.p0.i32(ptr nocapture, ptr nocapture, i32, i1) nounwind
+declare void @llvm.memcpy.p0.p0.i64(ptr nocapture, ptr nocapture, i64, i1) nounwind
; Same src/dest.
-define void @test1(i8* %a) {
+define void @test1(ptr %a) {
; CHECK-LABEL: @test1(
; CHECK-NEXT: ret void
;
- tail call void @llvm.memcpy.p0i8.p0i8.i32(i8* %a, i8* %a, i32 100, i1 false)
+ tail call void @llvm.memcpy.p0.p0.i32(ptr %a, ptr %a, i32 100, i1 false)
ret void
}
; PR8267 - same src/dest, but volatile.
-define void @test2(i8* %a) {
+define void @test2(ptr %a) {
; CHECK-LABEL: @test2(
-; CHECK-NEXT: tail call void @llvm.memcpy.p0i8.p0i8.i32(i8* [[A:%.*]], i8* [[A]], i32 100, i1 true)
+; CHECK-NEXT: tail call void @llvm.memcpy.p0.p0.i32(ptr [[A:%.*]], ptr [[A]], i32 100, i1 true)
; CHECK-NEXT: ret void
;
- tail call void @llvm.memcpy.p0i8.p0i8.i32(i8* %a, i8* %a, i32 100, i1 true)
+ tail call void @llvm.memcpy.p0.p0.i32(ptr %a, ptr %a, i32 100, i1 true)
ret void
}
; 17179869184 == 0x400000000 - make sure that doesn't get truncated to 32-bit.
-define void @test3(i8* %d, i8* %s) {
+define void @test3(ptr %d, ptr %s) {
; CHECK-LABEL: @test3(
-; CHECK-NEXT: tail call void @llvm.memcpy.p0i8.p0i8.i64(i8* noundef nonnull align 4 dereferenceable(17179869184) [[D:%.*]], i8* noundef nonnull align 4 dereferenceable(17179869184) [[S:%.*]], i64 17179869184, i1 false)
+; CHECK-NEXT: tail call void @llvm.memcpy.p0.p0.i64(ptr noundef nonnull align 4 dereferenceable(17179869184) [[D:%.*]], ptr noundef nonnull align 4 dereferenceable(17179869184) [[S:%.*]], i64 17179869184, i1 false)
; CHECK-NEXT: ret void
;
- tail call void @llvm.memcpy.p0i8.p0i8.i64(i8* align 4 %d, i8* align 4 %s, i64 17179869184, i1 false)
+ tail call void @llvm.memcpy.p0.p0.i64(ptr align 4 %d, ptr align 4 %s, i64 17179869184, i1 false)
ret void
}
@UnknownConstant = external constant i128
-define void @memcpy_to_constant(i8* %src) {
+define void @memcpy_to_constant(ptr %src) {
; CHECK-LABEL: @memcpy_to_constant(
; CHECK-NEXT: ret void
;
- %dest = bitcast i128* @UnknownConstant to i8*
- call void @llvm.memcpy.p0i8.p0i8.i32(i8* %dest, i8* %src, i32 16, i1 false)
+ call void @llvm.memcpy.p0.p0.i32(ptr @UnknownConstant, ptr %src, i32 16, i1 false)
ret void
}
define void @test_no_simplify() {
; CHECK-LABEL: @test_no_simplify(
- %dst = bitcast %struct.T1* @t1 to i8*
- %src = bitcast %struct.T2* @t2 to i8*
-; CHECK-NEXT: call i8* @__memcpy_chk
- call i8* @__memcpy_chk(i8* %dst, i8* %src, i64 1824)
+; CHECK-NEXT: call ptr @__memcpy_chk
+ call ptr @__memcpy_chk(ptr @t1, ptr @t2, i64 1824)
ret void
}
-declare i8* @__memcpy_chk(i8*, i8*, i64)
+declare ptr @__memcpy_chk(ptr, ptr, i64)
target datalayout = "e-p:32:32:32-i1:8:8-i8:8:8-i16:16:16-i32:32:32-i64:32:64-f32:32:32-f64:32:64-v64:64:64-v128:128:128-a0:0:64-f80:128:128"
-declare i8* @memmove(i8*, i8*, i32)
+declare ptr @memmove(ptr, ptr, i32)
; Check memmove(mem1, mem2, size) -> llvm.memmove(mem1, mem2, size, 1).
-define i8* @test_simplify1(i8* %mem1, i8* %mem2, i32 %size) {
+define ptr @test_simplify1(ptr %mem1, ptr %mem2, i32 %size) {
; CHECK-LABEL: @test_simplify1(
- %ret = call i8* @memmove(i8* %mem1, i8* %mem2, i32 %size)
+ %ret = call ptr @memmove(ptr %mem1, ptr %mem2, i32 %size)
; CHECK: call void @llvm.memmove
- ret i8* %ret
-; CHECK: ret i8* %mem1
+ ret ptr %ret
+; CHECK: ret ptr %mem1
}
-define i8* @test_simplify2(i8* %mem1, i8* %mem2, i32 %size) {
+define ptr @test_simplify2(ptr %mem1, ptr %mem2, i32 %size) {
; CHECK-LABEL: @test_simplify2(
; CHECK-NEXT: tail call void @llvm.memmove
-; CHECK-NEXT: ret i8* %mem1
- %ret = tail call i8* @memmove(i8* %mem1, i8* %mem2, i32 %size)
- ret i8* %ret
+; CHECK-NEXT: ret ptr %mem1
+ %ret = tail call ptr @memmove(ptr %mem1, ptr %mem2, i32 %size)
+ ret ptr %ret
}
-define i8* @test_no_simplify1(i8* %mem1, i8* %mem2, i32 %size) {
+define ptr @test_no_simplify1(ptr %mem1, ptr %mem2, i32 %size) {
; CHECK-LABEL: @test_no_simplify1(
-; CHECK-NEXT: %ret = musttail call i8* @memmove(i8* %mem1, i8* %mem2, i32 %size)
-; CHECK-NEXT: ret i8* %ret
- %ret = musttail call i8* @memmove(i8* %mem1, i8* %mem2, i32 %size)
- ret i8* %ret
+; CHECK-NEXT: %ret = musttail call ptr @memmove(ptr %mem1, ptr %mem2, i32 %size)
+; CHECK-NEXT: ret ptr %ret
+ %ret = musttail call ptr @memmove(ptr %mem1, ptr %mem2, i32 %size)
+ ret ptr %ret
}
-define i8* @test_no_incompatible_attr(i8* %mem1, i8* %mem2, i32 %size) {
+define ptr @test_no_incompatible_attr(ptr %mem1, ptr %mem2, i32 %size) {
; CHECK-LABEL: @test_no_incompatible_attr(
- %ret = call dereferenceable(1) i8* @memmove(i8* %mem1, i8* %mem2, i32 %size)
+ %ret = call dereferenceable(1) ptr @memmove(ptr %mem1, ptr %mem2, i32 %size)
; CHECK: call void @llvm.memmove
- ret i8* %ret
-; CHECK: ret i8* %mem1
+ ret ptr %ret
+; CHECK: ret ptr %mem1
}
target datalayout = "e-p:32:32:32-i1:8:8-i8:8:8-i16:16:16-i32:32:32-i64:32:64-f32:32:32-f64:32:64-v64:64:64-v128:128:128-a0:0:64-f80:128:128"
-declare i8 @memmove(i8*, i8*, i32)
+declare i8 @memmove(ptr, ptr, i32)
; Check that memmove functions with the wrong prototype aren't simplified.
-define i8 @test_no_simplify1(i8* %mem1, i8* %mem2, i32 %size) {
+define i8 @test_no_simplify1(ptr %mem1, ptr %mem2, i32 %size) {
; CHECK-LABEL: @test_no_simplify1(
- %ret = call i8 @memmove(i8* %mem1, i8* %mem2, i32 %size)
+ %ret = call i8 @memmove(ptr %mem1, ptr %mem2, i32 %size)
; CHECK: call i8 @memmove
ret i8 %ret
; CHECK: ret i8 %ret
define void @test_no_simplify() {
; CHECK-LABEL: @test_no_simplify(
- %dst = bitcast %struct.T1* @t1 to i8*
- %src = bitcast %struct.T2* @t2 to i8*
-; CHECK-NEXT: call i8* @__memmove_chk
- call i8* @__memmove_chk(i8* %dst, i8* %src, i64 1824)
+; CHECK-NEXT: call ptr @__memmove_chk
+ call ptr @__memmove_chk(ptr @t1, ptr @t2, i64 1824)
ret void
}
-declare i8* @__memmove_chk(i8*, i8*, i64)
+declare ptr @__memmove_chk(ptr, ptr, i64)
; NOTE: Assertions have been autogenerated by utils/update_test_checks.py
; RUN: opt -passes=instcombine -S < %s | FileCheck %s
-define i8* @memcpy_nonconst_n(i8* %d, i8* nocapture readonly %s, i64 %n) {
+define ptr @memcpy_nonconst_n(ptr %d, ptr nocapture readonly %s, i64 %n) {
; CHECK-LABEL: @memcpy_nonconst_n(
-; CHECK-NEXT: call void @llvm.memcpy.p0i8.p0i8.i64(i8* align 1 [[D:%.*]], i8* align 1 [[S:%.*]], i64 [[N:%.*]], i1 false)
-; CHECK-NEXT: [[TMP1:%.*]] = getelementptr inbounds i8, i8* [[D]], i64 [[N]]
-; CHECK-NEXT: ret i8* [[TMP1]]
+; CHECK-NEXT: call void @llvm.memcpy.p0.p0.i64(ptr align 1 [[D:%.*]], ptr align 1 [[S:%.*]], i64 [[N:%.*]], i1 false)
+; CHECK-NEXT: [[TMP1:%.*]] = getelementptr inbounds i8, ptr [[D]], i64 [[N]]
+; CHECK-NEXT: ret ptr [[TMP1]]
;
- %r = tail call i8* @mempcpy(i8* %d, i8* %s, i64 %n)
- ret i8* %r
+ %r = tail call ptr @mempcpy(ptr %d, ptr %s, i64 %n)
+ ret ptr %r
}
-define i8* @memcpy_nonconst_n_copy_attrs(i8* %d, i8* nocapture readonly %s, i64 %n) {
+define ptr @memcpy_nonconst_n_copy_attrs(ptr %d, ptr nocapture readonly %s, i64 %n) {
; CHECK-LABEL: @memcpy_nonconst_n_copy_attrs(
-; CHECK-NEXT: call void @llvm.memcpy.p0i8.p0i8.i64(i8* align 1 dereferenceable(16) [[D:%.*]], i8* align 1 [[S:%.*]], i64 [[N:%.*]], i1 false)
-; CHECK-NEXT: [[TMP1:%.*]] = getelementptr inbounds i8, i8* [[D]], i64 [[N]]
-; CHECK-NEXT: ret i8* [[TMP1]]
+; CHECK-NEXT: call void @llvm.memcpy.p0.p0.i64(ptr align 1 dereferenceable(16) [[D:%.*]], ptr align 1 [[S:%.*]], i64 [[N:%.*]], i1 false)
+; CHECK-NEXT: [[TMP1:%.*]] = getelementptr inbounds i8, ptr [[D]], i64 [[N]]
+; CHECK-NEXT: ret ptr [[TMP1]]
;
- %r = tail call i8* @mempcpy(i8* dereferenceable(16) %d, i8* %s, i64 %n)
- ret i8* %r
+ %r = tail call ptr @mempcpy(ptr dereferenceable(16) %d, ptr %s, i64 %n)
+ ret ptr %r
}
-define void @memcpy_nonconst_n_unused_retval(i8* %d, i8* nocapture readonly %s, i64 %n) {
+define void @memcpy_nonconst_n_unused_retval(ptr %d, ptr nocapture readonly %s, i64 %n) {
; CHECK-LABEL: @memcpy_nonconst_n_unused_retval(
-; CHECK-NEXT: call void @llvm.memcpy.p0i8.p0i8.i64(i8* align 1 [[D:%.*]], i8* align 1 [[S:%.*]], i64 [[N:%.*]], i1 false)
+; CHECK-NEXT: call void @llvm.memcpy.p0.p0.i64(ptr align 1 [[D:%.*]], ptr align 1 [[S:%.*]], i64 [[N:%.*]], i1 false)
; CHECK-NEXT: ret void
;
- call i8* @mempcpy(i8* %d, i8* %s, i64 %n)
+ call ptr @mempcpy(ptr %d, ptr %s, i64 %n)
ret void
}
-define i8* @memcpy_small_const_n(i8* %d, i8* nocapture readonly %s) {
+define ptr @memcpy_small_const_n(ptr %d, ptr nocapture readonly %s) {
; CHECK-LABEL: @memcpy_small_const_n(
-; CHECK-NEXT: [[TMP1:%.*]] = bitcast i8* [[S:%.*]] to i64*
-; CHECK-NEXT: [[TMP2:%.*]] = bitcast i8* [[D:%.*]] to i64*
-; CHECK-NEXT: [[TMP3:%.*]] = load i64, i64* [[TMP1]], align 1
-; CHECK-NEXT: store i64 [[TMP3]], i64* [[TMP2]], align 1
-; CHECK-NEXT: [[TMP4:%.*]] = getelementptr inbounds i8, i8* [[D]], i64 8
-; CHECK-NEXT: ret i8* [[TMP4]]
+; CHECK-NEXT: [[TMP3:%.*]] = load i64, ptr [[S:%.*]], align 1
+; CHECK-NEXT: store i64 [[TMP3]], ptr [[D:%.*]], align 1
+; CHECK-NEXT: [[TMP4:%.*]] = getelementptr inbounds i8, ptr [[D]], i64 8
+; CHECK-NEXT: ret ptr [[TMP4]]
;
- %r = tail call i8* @mempcpy(i8* %d, i8* %s, i64 8)
- ret i8* %r
+ %r = tail call ptr @mempcpy(ptr %d, ptr %s, i64 8)
+ ret ptr %r
}
-define i8* @memcpy_big_const_n(i8* %d, i8* nocapture readonly %s) {
+define ptr @memcpy_big_const_n(ptr %d, ptr nocapture readonly %s) {
; CHECK-LABEL: @memcpy_big_const_n(
-; CHECK-NEXT: call void @llvm.memcpy.p0i8.p0i8.i64(i8* noundef nonnull align 1 dereferenceable(1024) [[D:%.*]], i8* noundef nonnull align 1 dereferenceable(1024) [[S:%.*]], i64 1024, i1 false)
-; CHECK-NEXT: [[TMP1:%.*]] = getelementptr inbounds i8, i8* [[D]], i64 1024
-; CHECK-NEXT: ret i8* [[TMP1]]
+; CHECK-NEXT: call void @llvm.memcpy.p0.p0.i64(ptr noundef nonnull align 1 dereferenceable(1024) [[D:%.*]], ptr noundef nonnull align 1 dereferenceable(1024) [[S:%.*]], i64 1024, i1 false)
+; CHECK-NEXT: [[TMP1:%.*]] = getelementptr inbounds i8, ptr [[D]], i64 1024
+; CHECK-NEXT: ret ptr [[TMP1]]
;
- %r = tail call i8* @mempcpy(i8* %d, i8* %s, i64 1024)
- ret i8* %r
+ %r = tail call ptr @mempcpy(ptr %d, ptr %s, i64 1024)
+ ret ptr %r
}
; The original call may have attributes that can not propagate to memcpy.
define i32 @PR48810() {
; CHECK-LABEL: @PR48810(
-; CHECK-NEXT: call void @llvm.memcpy.p0i8.p0i8.i64(i8* align 1 undef, i8* align 4294967296 null, i64 undef, i1 false)
+; CHECK-NEXT: call void @llvm.memcpy.p0.p0.i64(ptr align 1 undef, ptr align 4294967296 null, i64 undef, i1 false)
; CHECK-NEXT: ret i32 undef
;
- %r = call dereferenceable(1) i8* @mempcpy(i8* undef, i8* null, i64 undef)
+ %r = call dereferenceable(1) ptr @mempcpy(ptr undef, ptr null, i64 undef)
ret i32 undef
}
-define i8* @memcpy_no_simplify1(i8* %d, i8* nocapture readonly %s, i64 %n) {
+define ptr @memcpy_no_simplify1(ptr %d, ptr nocapture readonly %s, i64 %n) {
; CHECK-LABEL: @memcpy_no_simplify1(
-; CHECK-NEXT: %r = musttail call i8* @mempcpy(i8* %d, i8* %s, i64 %n)
-; CHECK-NEXT: ret i8* %r
+; CHECK-NEXT: %r = musttail call ptr @mempcpy(ptr %d, ptr %s, i64 %n)
+; CHECK-NEXT: ret ptr %r
;
- %r = musttail call i8* @mempcpy(i8* %d, i8* %s, i64 %n)
- ret i8* %r
+ %r = musttail call ptr @mempcpy(ptr %d, ptr %s, i64 %n)
+ ret ptr %r
}
-declare i8* @mempcpy(i8*, i8* nocapture readonly, i64)
+declare ptr @mempcpy(ptr, ptr nocapture readonly, i64)
; Verify that memrchr calls with an out of bounds size are not folded to
; null (they might be intercepted by sanitizers).
-declare i8* @memrchr(i8*, i32, i64)
+declare ptr @memrchr(ptr, i32, i64)
@ax = external global [0 x i8]
@ax1 = external global [1 x i8]
; Do not fold memrchr(a12345, C, UINT32_MAX + 1LU) to null or to a12345
; as might happen if the size were to be truncated to int32_t.
-define i8* @call_memrchr_a12345_c_ui32max_p1(i32 %C) {
+define ptr @call_memrchr_a12345_c_ui32max_p1(i32 %C) {
; CHECK-LABEL: @call_memrchr_a12345_c_ui32max_p1(
-; CHECK-NEXT: [[RET:%.*]] = call i8* @memrchr(i8* noundef nonnull dereferenceable(4294967296) getelementptr inbounds ([5 x i8], [5 x i8]* @a12345, i64 0, i64 0), i32 [[C:%.*]], i64 4294967296)
-; CHECK-NEXT: ret i8* [[RET]]
+; CHECK-NEXT: [[RET:%.*]] = call ptr @memrchr(ptr noundef nonnull dereferenceable(4294967296) @a12345, i32 [[C:%.*]], i64 4294967296)
+; CHECK-NEXT: ret ptr [[RET]]
;
- %ptr = getelementptr [5 x i8], [5 x i8]* @a12345, i64 0, i64 0
- %ret = call i8* @memrchr(i8* %ptr, i32 %C, i64 4294967296)
- ret i8* %ret
+ %ret = call ptr @memrchr(ptr @a12345, i32 %C, i64 4294967296)
+ ret ptr %ret
}
; Do not fold memrchr(ax1, C, UINT32_MAX + 2LU) to null or to *ax1 == 1.
-define i8* @call_memrchr_ax1_c_ui32max_p2(i32 %C) {
+define ptr @call_memrchr_ax1_c_ui32max_p2(i32 %C) {
; CHECK-LABEL: @call_memrchr_ax1_c_ui32max_p2(
-; CHECK-NEXT: [[RET:%.*]] = call i8* @memrchr(i8* noundef nonnull dereferenceable(4294967297) getelementptr inbounds ([1 x i8], [1 x i8]* @ax1, i64 0, i64 0), i32 [[C:%.*]], i64 4294967297)
-; CHECK-NEXT: ret i8* [[RET]]
+; CHECK-NEXT: [[RET:%.*]] = call ptr @memrchr(ptr noundef nonnull dereferenceable(4294967297) @ax1, i32 [[C:%.*]], i64 4294967297)
+; CHECK-NEXT: ret ptr [[RET]]
;
- %ptr = getelementptr [1 x i8], [1 x i8]* @ax1, i64 0, i64 0
- %ret = call i8* @memrchr(i8* %ptr, i32 %C, i64 4294967297)
- ret i8* %ret
+ %ret = call ptr @memrchr(ptr @ax1, i32 %C, i64 4294967297)
+ ret ptr %ret
}
; Do not fold memrchr(ax, C, UINT32_MAX + 2LU) to *ax == 1.
-define i8* @call_memrchr_ax_c_ui32max_p2(i32 %C) {
+define ptr @call_memrchr_ax_c_ui32max_p2(i32 %C) {
; CHECK-LABEL: @call_memrchr_ax_c_ui32max_p2(
-; CHECK-NEXT: [[RET:%.*]] = call i8* @memrchr(i8* noundef nonnull dereferenceable(4294967297) getelementptr inbounds ([0 x i8], [0 x i8]* @ax, i64 0, i64 0), i32 [[C:%.*]], i64 4294967297)
-; CHECK-NEXT: ret i8* [[RET]]
+; CHECK-NEXT: [[RET:%.*]] = call ptr @memrchr(ptr noundef nonnull dereferenceable(4294967297) @ax, i32 [[C:%.*]], i64 4294967297)
+; CHECK-NEXT: ret ptr [[RET]]
;
- %ptr = getelementptr [0 x i8], [0 x i8]* @ax, i64 0, i64 0
- %ret = call i8* @memrchr(i8* %ptr, i32 %C, i64 4294967297)
- ret i8* %ret
+ %ret = call ptr @memrchr(ptr @ax, i32 %C, i64 4294967297)
+ ret ptr %ret
}
; Do not fold memrchr(a12345, C, 6) to null.
-define i8* @call_memrchr_a12345_c_6(i32 %C) {
+define ptr @call_memrchr_a12345_c_6(i32 %C) {
; CHECK-LABEL: @call_memrchr_a12345_c_6(
-; CHECK-NEXT: [[RET:%.*]] = call i8* @memrchr(i8* noundef nonnull dereferenceable(6) getelementptr inbounds ([5 x i8], [5 x i8]* @a12345, i64 0, i64 0), i32 [[C:%.*]], i64 6)
-; CHECK-NEXT: ret i8* [[RET]]
+; CHECK-NEXT: [[RET:%.*]] = call ptr @memrchr(ptr noundef nonnull dereferenceable(6) @a12345, i32 [[C:%.*]], i64 6)
+; CHECK-NEXT: ret ptr [[RET]]
;
- %ptr = getelementptr [5 x i8], [5 x i8]* @a12345, i64 0, i64 0
- %ret = call i8* @memrchr(i8* %ptr, i32 %C, i64 6)
- ret i8* %ret
+ %ret = call ptr @memrchr(ptr @a12345, i32 %C, i64 6)
+ ret ptr %ret
}
; Do not fold memrchr(a12345, C, SIZE_MAX) to null.
-define i8* @call_memrchr_a12345_c_szmax(i32 %C) {
+define ptr @call_memrchr_a12345_c_szmax(i32 %C) {
; CHECK-LABEL: @call_memrchr_a12345_c_szmax(
-; CHECK-NEXT: [[RET:%.*]] = call i8* @memrchr(i8* noundef nonnull dereferenceable(18446744073709551615) getelementptr inbounds ([5 x i8], [5 x i8]* @a12345, i64 0, i64 0), i32 [[C:%.*]], i64 -1)
-; CHECK-NEXT: ret i8* [[RET]]
+; CHECK-NEXT: [[RET:%.*]] = call ptr @memrchr(ptr noundef nonnull dereferenceable(18446744073709551615) @a12345, i32 [[C:%.*]], i64 -1)
+; CHECK-NEXT: ret ptr [[RET]]
;
- %ptr = getelementptr [5 x i8], [5 x i8]* @a12345, i64 0, i64 0
- %ret = call i8* @memrchr(i8* %ptr, i32 %C, i64 18446744073709551615)
- ret i8* %ret
+ %ret = call ptr @memrchr(ptr @a12345, i32 %C, i64 18446744073709551615)
+ ret ptr %ret
}
; RUN: opt < %s -passes=instcombine -S -data-layout="E" | FileCheck %s --check-prefixes=BE
; RUN: opt < %s -passes=instcombine -S -data-layout="e" | FileCheck %s --check-prefixes=LE
-declare i8* @memrchr(i8*, i32, i64)
+declare ptr @memrchr(ptr, i32, i64)
; BE representation: { 'a', 'b', 'c', 'd', 'e', ..., 'p', 'a', 'b', 'c', 'd' }
; LE representation: { 'd', 'c', 'b', 'a', 'h', ..., 'm', 'd', 'c', 'b', 'a' }
; Fold memrchr(a, C, 16) for C in ['a', 'd'] U ['o', 'q'].
-define void @fold_memrchr_a_16(i64* %pcmp) {
+define void @fold_memrchr_a_16(ptr %pcmp) {
; BE-LABEL: @fold_memrchr_a_16(
-; BE-NEXT: store i64 0, i64* [[PCMP:%.*]], align 4
-; BE-NEXT: [[PSTOR1:%.*]] = getelementptr i64, i64* [[PCMP]], i64 1
-; BE-NEXT: store i64 1, i64* [[PSTOR1]], align 4
-; BE-NEXT: [[PSTOR2:%.*]] = getelementptr i64, i64* [[PCMP]], i64 2
-; BE-NEXT: store i64 2, i64* [[PSTOR2]], align 4
-; BE-NEXT: [[PSTOR3:%.*]] = getelementptr i64, i64* [[PCMP]], i64 3
-; BE-NEXT: store i64 3, i64* [[PSTOR3]], align 4
-; BE-NEXT: [[PSTOR4:%.*]] = getelementptr i64, i64* [[PCMP]], i64 4
-; BE-NEXT: store i64 13, i64* [[PSTOR4]], align 4
-; BE-NEXT: [[PSTOR6:%.*]] = getelementptr i64, i64* [[PCMP]], i64 6
-; BE-NEXT: store i64 14, i64* [[PSTOR6]], align 4
-; BE-NEXT: [[PSTOR7:%.*]] = getelementptr i64, i64* [[PCMP]], i64 7
-; BE-NEXT: store i64 15, i64* [[PSTOR7]], align 4
-; BE-NEXT: [[PSTOR8:%.*]] = getelementptr i64, i64* [[PCMP]], i64 8
-; BE-NEXT: store i64 0, i64* [[PSTOR8]], align 4
+; BE-NEXT: store i64 0, ptr [[PCMP:%.*]], align 4
+; BE-NEXT: [[PSTOR1:%.*]] = getelementptr i64, ptr [[PCMP]], i64 1
+; BE-NEXT: store i64 1, ptr [[PSTOR1]], align 4
+; BE-NEXT: [[PSTOR2:%.*]] = getelementptr i64, ptr [[PCMP]], i64 2
+; BE-NEXT: store i64 2, ptr [[PSTOR2]], align 4
+; BE-NEXT: [[PSTOR3:%.*]] = getelementptr i64, ptr [[PCMP]], i64 3
+; BE-NEXT: store i64 3, ptr [[PSTOR3]], align 4
+; BE-NEXT: [[PSTOR4:%.*]] = getelementptr i64, ptr [[PCMP]], i64 4
+; BE-NEXT: store i64 13, ptr [[PSTOR4]], align 4
+; BE-NEXT: [[PSTOR6:%.*]] = getelementptr i64, ptr [[PCMP]], i64 6
+; BE-NEXT: store i64 14, ptr [[PSTOR6]], align 4
+; BE-NEXT: [[PSTOR7:%.*]] = getelementptr i64, ptr [[PCMP]], i64 7
+; BE-NEXT: store i64 15, ptr [[PSTOR7]], align 4
+; BE-NEXT: [[PSTOR8:%.*]] = getelementptr i64, ptr [[PCMP]], i64 8
+; BE-NEXT: store i64 0, ptr [[PSTOR8]], align 4
; BE-NEXT: ret void
;
; LE-LABEL: @fold_memrchr_a_16(
-; LE-NEXT: store i64 3, i64* [[PCMP:%.*]], align 4
-; LE-NEXT: [[PSTOR1:%.*]] = getelementptr i64, i64* [[PCMP]], i64 1
-; LE-NEXT: store i64 2, i64* [[PSTOR1]], align 4
-; LE-NEXT: [[PSTOR2:%.*]] = getelementptr i64, i64* [[PCMP]], i64 2
-; LE-NEXT: store i64 1, i64* [[PSTOR2]], align 4
-; LE-NEXT: [[PSTOR3:%.*]] = getelementptr i64, i64* [[PCMP]], i64 3
-; LE-NEXT: store i64 0, i64* [[PSTOR3]], align 4
-; LE-NEXT: [[PSTOR4:%.*]] = getelementptr i64, i64* [[PCMP]], i64 4
-; LE-NEXT: store i64 14, i64* [[PSTOR4]], align 4
-; LE-NEXT: [[PSTOR6:%.*]] = getelementptr i64, i64* [[PCMP]], i64 6
-; LE-NEXT: store i64 13, i64* [[PSTOR6]], align 4
-; LE-NEXT: [[PSTOR7:%.*]] = getelementptr i64, i64* [[PCMP]], i64 7
-; LE-NEXT: store i64 12, i64* [[PSTOR7]], align 4
-; LE-NEXT: [[PSTOR8:%.*]] = getelementptr i64, i64* [[PCMP]], i64 8
-; LE-NEXT: store i64 0, i64* [[PSTOR8]], align 4
+; LE-NEXT: store i64 3, ptr [[PCMP:%.*]], align 4
+; LE-NEXT: [[PSTOR1:%.*]] = getelementptr i64, ptr [[PCMP]], i64 1
+; LE-NEXT: store i64 2, ptr [[PSTOR1]], align 4
+; LE-NEXT: [[PSTOR2:%.*]] = getelementptr i64, ptr [[PCMP]], i64 2
+; LE-NEXT: store i64 1, ptr [[PSTOR2]], align 4
+; LE-NEXT: [[PSTOR3:%.*]] = getelementptr i64, ptr [[PCMP]], i64 3
+; LE-NEXT: store i64 0, ptr [[PSTOR3]], align 4
+; LE-NEXT: [[PSTOR4:%.*]] = getelementptr i64, ptr [[PCMP]], i64 4
+; LE-NEXT: store i64 14, ptr [[PSTOR4]], align 4
+; LE-NEXT: [[PSTOR6:%.*]] = getelementptr i64, ptr [[PCMP]], i64 6
+; LE-NEXT: store i64 13, ptr [[PSTOR6]], align 4
+; LE-NEXT: [[PSTOR7:%.*]] = getelementptr i64, ptr [[PCMP]], i64 7
+; LE-NEXT: store i64 12, ptr [[PSTOR7]], align 4
+; LE-NEXT: [[PSTOR8:%.*]] = getelementptr i64, ptr [[PCMP]], i64 8
+; LE-NEXT: store i64 0, ptr [[PSTOR8]], align 4
; LE-NEXT: ret void
;
- %p0 = getelementptr [5 x i32], [5 x i32]* @a, i64 0, i64 0
- %p1 = bitcast i32* %p0 to i8*
- %ip0 = ptrtoint [5 x i32]* @a to i64
+ %ip0 = ptrtoint ptr @a to i64
; Fold memrchr(a, 'a', 16) - a to 0 (3 in LE).
- %pa = call i8* @memrchr(i8* %p1, i32 97, i64 16)
- %ipa = ptrtoint i8* %pa to i64
+ %pa = call ptr @memrchr(ptr @a, i32 97, i64 16)
+ %ipa = ptrtoint ptr %pa to i64
%offa = sub i64 %ipa, %ip0
- %pstor0 = getelementptr i64, i64* %pcmp, i64 0
- store i64 %offa, i64* %pstor0
+ store i64 %offa, ptr %pcmp
; Fold memrchr(a, 'b', 16) - a to 1 (2 in LE)
- %pb = call i8* @memrchr(i8* %p1, i32 98, i64 16)
- %ipb = ptrtoint i8* %pb to i64
+ %pb = call ptr @memrchr(ptr @a, i32 98, i64 16)
+ %ipb = ptrtoint ptr %pb to i64
%offb = sub i64 %ipb, %ip0
- %pstor1 = getelementptr i64, i64* %pcmp, i64 1
- store i64 %offb, i64* %pstor1
+ %pstor1 = getelementptr i64, ptr %pcmp, i64 1
+ store i64 %offb, ptr %pstor1
; Fold memrchr(a, 'c', 16) - a to 2 (1 in LE)
- %pc = call i8* @memrchr(i8* %p1, i32 99, i64 16)
- %ipc = ptrtoint i8* %pc to i64
+ %pc = call ptr @memrchr(ptr @a, i32 99, i64 16)
+ %ipc = ptrtoint ptr %pc to i64
%offc = sub i64 %ipc, %ip0
- %pstor2 = getelementptr i64, i64* %pcmp, i64 2
- store i64 %offc, i64* %pstor2
+ %pstor2 = getelementptr i64, ptr %pcmp, i64 2
+ store i64 %offc, ptr %pstor2
; Fold memrchr(a, 'd', 16) - a to 3 (0 in LE)
- %pd = call i8* @memrchr(i8* %p1, i32 100, i64 16)
- %ipd = ptrtoint i8* %pd to i64
+ %pd = call ptr @memrchr(ptr @a, i32 100, i64 16)
+ %ipd = ptrtoint ptr %pd to i64
%offd = sub i64 %ipd, %ip0
- %pstor3 = getelementptr i64, i64* %pcmp, i64 3
- store i64 %offd, i64* %pstor3
+ %pstor3 = getelementptr i64, ptr %pcmp, i64 3
+ store i64 %offd, ptr %pstor3
; Fold memrchr(a, 'n', 16) - a to 13 (14 in LE)
- %pn = call i8* @memrchr(i8* %p1, i32 110, i64 16)
- %ipn = ptrtoint i8* %pn to i64
+ %pn = call ptr @memrchr(ptr @a, i32 110, i64 16)
+ %ipn = ptrtoint ptr %pn to i64
%offn = sub i64 %ipn, %ip0
- %pstor4 = getelementptr i64, i64* %pcmp, i64 4
- store i64 %offn, i64* %pstor4
+ %pstor4 = getelementptr i64, ptr %pcmp, i64 4
+ store i64 %offn, ptr %pstor4
; Fold memrchr(a, 'o', 16) - a to 14 (13 in LE)
- %po = call i8* @memrchr(i8* %p1, i32 111, i64 16)
- %ipo = ptrtoint i8* %po to i64
+ %po = call ptr @memrchr(ptr @a, i32 111, i64 16)
+ %ipo = ptrtoint ptr %po to i64
%offo = sub i64 %ipo, %ip0
- %pstor6 = getelementptr i64, i64* %pcmp, i64 6
- store i64 %offo, i64* %pstor6
+ %pstor6 = getelementptr i64, ptr %pcmp, i64 6
+ store i64 %offo, ptr %pstor6
; Fold memrchr(a, 'p', 16) - a to 15 (12 in LE)
- %pp = call i8* @memrchr(i8* %p1, i32 112, i64 16)
- %ipp = ptrtoint i8* %pp to i64
+ %pp = call ptr @memrchr(ptr @a, i32 112, i64 16)
+ %ipp = ptrtoint ptr %pp to i64
%offp = sub i64 %ipp, %ip0
- %pstor7 = getelementptr i64, i64* %pcmp, i64 7
- store i64 %offp, i64* %pstor7
+ %pstor7 = getelementptr i64, ptr %pcmp, i64 7
+ store i64 %offp, ptr %pstor7
; Fold memrchr(a, 'q', 16) to null in both BE and LE.
- %pq = call i8* @memrchr(i8* %p1, i32 113, i64 16)
- %ipq = ptrtoint i8* %pq to i64
- %pstor8 = getelementptr i64, i64* %pcmp, i64 8
- store i64 %ipq, i64* %pstor8
+ %pq = call ptr @memrchr(ptr @a, i32 113, i64 16)
+ %ipq = ptrtoint ptr %pq to i64
+ %pstor8 = getelementptr i64, ptr %pcmp, i64 8
+ store i64 %ipq, ptr %pstor8
ret void
}
; Fold memrchr(a + 1, C, 12) for C in ['e', 'h'] U ['a', 'd'].
-define void @fold_memrchr_a_p1_16(i64* %pcmp) {
+define void @fold_memrchr_a_p1_16(ptr %pcmp) {
; BE-LABEL: @fold_memrchr_a_p1_16(
-; BE-NEXT: store i64 0, i64* [[PCMP:%.*]], align 4
-; BE-NEXT: [[PSTOR1:%.*]] = getelementptr i64, i64* [[PCMP]], i64 1
-; BE-NEXT: store i64 1, i64* [[PSTOR1]], align 4
-; BE-NEXT: [[PSTOR2:%.*]] = getelementptr i64, i64* [[PCMP]], i64 2
-; BE-NEXT: store i64 2, i64* [[PSTOR2]], align 4
-; BE-NEXT: [[PSTOR3:%.*]] = getelementptr i64, i64* [[PCMP]], i64 3
-; BE-NEXT: store i64 3, i64* [[PSTOR3]], align 4
-; BE-NEXT: [[PSTOR4:%.*]] = getelementptr i64, i64* [[PCMP]], i64 4
-; BE-NEXT: store i64 0, i64* [[PSTOR4]], align 4
-; BE-NEXT: [[PSTOR5:%.*]] = getelementptr i64, i64* [[PCMP]], i64 5
-; BE-NEXT: store i64 0, i64* [[PSTOR5]], align 4
+; BE-NEXT: store i64 0, ptr [[PCMP:%.*]], align 4
+; BE-NEXT: [[PSTOR1:%.*]] = getelementptr i64, ptr [[PCMP]], i64 1
+; BE-NEXT: store i64 1, ptr [[PSTOR1]], align 4
+; BE-NEXT: [[PSTOR2:%.*]] = getelementptr i64, ptr [[PCMP]], i64 2
+; BE-NEXT: store i64 2, ptr [[PSTOR2]], align 4
+; BE-NEXT: [[PSTOR3:%.*]] = getelementptr i64, ptr [[PCMP]], i64 3
+; BE-NEXT: store i64 3, ptr [[PSTOR3]], align 4
+; BE-NEXT: [[PSTOR4:%.*]] = getelementptr i64, ptr [[PCMP]], i64 4
+; BE-NEXT: store i64 0, ptr [[PSTOR4]], align 4
+; BE-NEXT: [[PSTOR5:%.*]] = getelementptr i64, ptr [[PCMP]], i64 5
+; BE-NEXT: store i64 0, ptr [[PSTOR5]], align 4
; BE-NEXT: ret void
;
; LE-LABEL: @fold_memrchr_a_p1_16(
-; LE-NEXT: store i64 3, i64* [[PCMP:%.*]], align 4
-; LE-NEXT: [[PSTOR1:%.*]] = getelementptr i64, i64* [[PCMP]], i64 1
-; LE-NEXT: store i64 2, i64* [[PSTOR1]], align 4
-; LE-NEXT: [[PSTOR2:%.*]] = getelementptr i64, i64* [[PCMP]], i64 2
-; LE-NEXT: store i64 1, i64* [[PSTOR2]], align 4
-; LE-NEXT: [[PSTOR3:%.*]] = getelementptr i64, i64* [[PCMP]], i64 3
-; LE-NEXT: store i64 0, i64* [[PSTOR3]], align 4
-; LE-NEXT: [[PSTOR4:%.*]] = getelementptr i64, i64* [[PCMP]], i64 4
-; LE-NEXT: store i64 0, i64* [[PSTOR4]], align 4
-; LE-NEXT: [[PSTOR5:%.*]] = getelementptr i64, i64* [[PCMP]], i64 5
-; LE-NEXT: store i64 0, i64* [[PSTOR5]], align 4
+; LE-NEXT: store i64 3, ptr [[PCMP:%.*]], align 4
+; LE-NEXT: [[PSTOR1:%.*]] = getelementptr i64, ptr [[PCMP]], i64 1
+; LE-NEXT: store i64 2, ptr [[PSTOR1]], align 4
+; LE-NEXT: [[PSTOR2:%.*]] = getelementptr i64, ptr [[PCMP]], i64 2
+; LE-NEXT: store i64 1, ptr [[PSTOR2]], align 4
+; LE-NEXT: [[PSTOR3:%.*]] = getelementptr i64, ptr [[PCMP]], i64 3
+; LE-NEXT: store i64 0, ptr [[PSTOR3]], align 4
+; LE-NEXT: [[PSTOR4:%.*]] = getelementptr i64, ptr [[PCMP]], i64 4
+; LE-NEXT: store i64 0, ptr [[PSTOR4]], align 4
+; LE-NEXT: [[PSTOR5:%.*]] = getelementptr i64, ptr [[PCMP]], i64 5
+; LE-NEXT: store i64 0, ptr [[PSTOR5]], align 4
; LE-NEXT: ret void
;
- %p0 = getelementptr [5 x i32], [5 x i32]* @a, i64 0, i64 1
- %p1 = bitcast i32* %p0 to i8*
- %ip0 = ptrtoint i8* %p1 to i64
+ %p0 = getelementptr [5 x i32], ptr @a, i64 0, i64 1
+ %ip0 = ptrtoint ptr %p0 to i64
; Fold memrchr(a + 1, 'e', 12) - a to 0 (3 in LE).
- %pe = call i8* @memrchr(i8* %p1, i32 101, i64 12)
- %ipe = ptrtoint i8* %pe to i64
+ %pe = call ptr @memrchr(ptr %p0, i32 101, i64 12)
+ %ipe = ptrtoint ptr %pe to i64
%offe = sub i64 %ipe, %ip0
- %pstor0 = getelementptr i64, i64* %pcmp, i64 0
- store i64 %offe, i64* %pstor0
+ store i64 %offe, ptr %pcmp
; Fold memrchr(a + 1, 'f', 12) - a to 1 (2 in LE).
- %pf = call i8* @memrchr(i8* %p1, i32 102, i64 12)
- %ipf = ptrtoint i8* %pf to i64
+ %pf = call ptr @memrchr(ptr %p0, i32 102, i64 12)
+ %ipf = ptrtoint ptr %pf to i64
%offf = sub i64 %ipf, %ip0
- %pstor1 = getelementptr i64, i64* %pcmp, i64 1
- store i64 %offf, i64* %pstor1
+ %pstor1 = getelementptr i64, ptr %pcmp, i64 1
+ store i64 %offf, ptr %pstor1
; Fold memrchr(a + 1, 'g', 12) - a to 2 (1 in LE).
- %pg = call i8* @memrchr(i8* %p1, i32 103, i64 12)
- %ipg = ptrtoint i8* %pg to i64
+ %pg = call ptr @memrchr(ptr %p0, i32 103, i64 12)
+ %ipg = ptrtoint ptr %pg to i64
%offg = sub i64 %ipg, %ip0
- %pstor2 = getelementptr i64, i64* %pcmp, i64 2
- store i64 %offg, i64* %pstor2
+ %pstor2 = getelementptr i64, ptr %pcmp, i64 2
+ store i64 %offg, ptr %pstor2
; Fold memrchr(a + 1, 'h', 12) - a to 3 (0 in LE).
- %ph = call i8* @memrchr(i8* %p1, i32 104, i64 12)
- %iph = ptrtoint i8* %ph to i64
+ %ph = call ptr @memrchr(ptr %p0, i32 104, i64 12)
+ %iph = ptrtoint ptr %ph to i64
%offh = sub i64 %iph, %ip0
- %pstor3 = getelementptr i64, i64* %pcmp, i64 3
- store i64 %offh, i64* %pstor3
+ %pstor3 = getelementptr i64, ptr %pcmp, i64 3
+ store i64 %offh, ptr %pstor3
; Fold memrchr(a + 1, 'a', 12) to null in both BE and LE.
- %pa = call i8* @memrchr(i8* %p1, i32 97, i64 12)
- %ipa = ptrtoint i8* %pa to i64
- %pstor4 = getelementptr i64, i64* %pcmp, i64 4
- store i64 %ipa, i64* %pstor4
+ %pa = call ptr @memrchr(ptr %p0, i32 97, i64 12)
+ %ipa = ptrtoint ptr %pa to i64
+ %pstor4 = getelementptr i64, ptr %pcmp, i64 4
+ store i64 %ipa, ptr %pstor4
; Fold memrchr(a + 1, 'd', 12) to null in both BE and LE.
- %pd = call i8* @memrchr(i8* %p1, i32 100, i64 12)
- %ipd = ptrtoint i8* %pd to i64
- %pstor5 = getelementptr i64, i64* %pcmp, i64 5
- store i64 %ipd, i64* %pstor5
+ %pd = call ptr @memrchr(ptr %p0, i32 100, i64 12)
+ %ipd = ptrtoint ptr %pd to i64
+ %pstor5 = getelementptr i64, ptr %pcmp, i64 5
+ store i64 %ipd, ptr %pstor5
ret void
}
; Fold memrchr(a, C, 20) for C in ['a', 'e'].
-define void @fold_memrchr_a_20(i64* %pcmp) {
+define void @fold_memrchr_a_20(ptr %pcmp) {
; BE-LABEL: @fold_memrchr_a_20(
-; BE-NEXT: store i64 16, i64* [[PCMP:%.*]], align 4
-; BE-NEXT: [[PSTOR1:%.*]] = getelementptr i64, i64* [[PCMP]], i64 1
-; BE-NEXT: store i64 17, i64* [[PSTOR1]], align 4
-; BE-NEXT: [[PSTOR2:%.*]] = getelementptr i64, i64* [[PCMP]], i64 2
-; BE-NEXT: store i64 18, i64* [[PSTOR2]], align 4
-; BE-NEXT: [[PSTOR3:%.*]] = getelementptr i64, i64* [[PCMP]], i64 3
-; BE-NEXT: store i64 19, i64* [[PSTOR3]], align 4
-; BE-NEXT: [[PSTOR4:%.*]] = getelementptr i64, i64* [[PCMP]], i64 4
-; BE-NEXT: store i64 4, i64* [[PSTOR4]], align 4
+; BE-NEXT: store i64 16, ptr [[PCMP:%.*]], align 4
+; BE-NEXT: [[PSTOR1:%.*]] = getelementptr i64, ptr [[PCMP]], i64 1
+; BE-NEXT: store i64 17, ptr [[PSTOR1]], align 4
+; BE-NEXT: [[PSTOR2:%.*]] = getelementptr i64, ptr [[PCMP]], i64 2
+; BE-NEXT: store i64 18, ptr [[PSTOR2]], align 4
+; BE-NEXT: [[PSTOR3:%.*]] = getelementptr i64, ptr [[PCMP]], i64 3
+; BE-NEXT: store i64 19, ptr [[PSTOR3]], align 4
+; BE-NEXT: [[PSTOR4:%.*]] = getelementptr i64, ptr [[PCMP]], i64 4
+; BE-NEXT: store i64 4, ptr [[PSTOR4]], align 4
; BE-NEXT: ret void
;
; LE-LABEL: @fold_memrchr_a_20(
-; LE-NEXT: store i64 19, i64* [[PCMP:%.*]], align 4
-; LE-NEXT: [[PSTOR1:%.*]] = getelementptr i64, i64* [[PCMP]], i64 1
-; LE-NEXT: store i64 18, i64* [[PSTOR1]], align 4
-; LE-NEXT: [[PSTOR2:%.*]] = getelementptr i64, i64* [[PCMP]], i64 2
-; LE-NEXT: store i64 17, i64* [[PSTOR2]], align 4
-; LE-NEXT: [[PSTOR3:%.*]] = getelementptr i64, i64* [[PCMP]], i64 3
-; LE-NEXT: store i64 16, i64* [[PSTOR3]], align 4
-; LE-NEXT: [[PSTOR4:%.*]] = getelementptr i64, i64* [[PCMP]], i64 4
-; LE-NEXT: store i64 7, i64* [[PSTOR4]], align 4
+; LE-NEXT: store i64 19, ptr [[PCMP:%.*]], align 4
+; LE-NEXT: [[PSTOR1:%.*]] = getelementptr i64, ptr [[PCMP]], i64 1
+; LE-NEXT: store i64 18, ptr [[PSTOR1]], align 4
+; LE-NEXT: [[PSTOR2:%.*]] = getelementptr i64, ptr [[PCMP]], i64 2
+; LE-NEXT: store i64 17, ptr [[PSTOR2]], align 4
+; LE-NEXT: [[PSTOR3:%.*]] = getelementptr i64, ptr [[PCMP]], i64 3
+; LE-NEXT: store i64 16, ptr [[PSTOR3]], align 4
+; LE-NEXT: [[PSTOR4:%.*]] = getelementptr i64, ptr [[PCMP]], i64 4
+; LE-NEXT: store i64 7, ptr [[PSTOR4]], align 4
; LE-NEXT: ret void
;
- %p0 = getelementptr [5 x i32], [5 x i32]* @a, i64 0, i64 0
- %p1 = bitcast i32* %p0 to i8*
- %ip0 = ptrtoint i8* %p1 to i64
+ %ip0 = ptrtoint ptr @a to i64
; Fold memrchr(a, 'a', 20) - a to 16 (19 in LE).
- %pa = call i8* @memrchr(i8* %p1, i32 97, i64 20)
- %ipa = ptrtoint i8* %pa to i64
+ %pa = call ptr @memrchr(ptr @a, i32 97, i64 20)
+ %ipa = ptrtoint ptr %pa to i64
%offa = sub i64 %ipa, %ip0
- %pstor0 = getelementptr i64, i64* %pcmp, i64 0
- store i64 %offa, i64* %pstor0
+ store i64 %offa, ptr %pcmp
; Fold memrchr(a, 'b', 16) - a to 17 (18 in LE)
- %pb = call i8* @memrchr(i8* %p1, i32 98, i64 20)
- %ipb = ptrtoint i8* %pb to i64
+ %pb = call ptr @memrchr(ptr @a, i32 98, i64 20)
+ %ipb = ptrtoint ptr %pb to i64
%offb = sub i64 %ipb, %ip0
- %pstor1 = getelementptr i64, i64* %pcmp, i64 1
- store i64 %offb, i64* %pstor1
+ %pstor1 = getelementptr i64, ptr %pcmp, i64 1
+ store i64 %offb, ptr %pstor1
; Fold memrchr(a, 'c', 16) - a to 18 (17 in LE)
- %pc = call i8* @memrchr(i8* %p1, i32 99, i64 20)
- %ipc = ptrtoint i8* %pc to i64
+ %pc = call ptr @memrchr(ptr @a, i32 99, i64 20)
+ %ipc = ptrtoint ptr %pc to i64
%offc = sub i64 %ipc, %ip0
- %pstor2 = getelementptr i64, i64* %pcmp, i64 2
- store i64 %offc, i64* %pstor2
+ %pstor2 = getelementptr i64, ptr %pcmp, i64 2
+ store i64 %offc, ptr %pstor2
; Fold memrchr(a, 'd', 16) - a to 19 (16 in LE)
- %pd = call i8* @memrchr(i8* %p1, i32 100, i64 20)
- %ipd = ptrtoint i8* %pd to i64
+ %pd = call ptr @memrchr(ptr @a, i32 100, i64 20)
+ %ipd = ptrtoint ptr %pd to i64
%offd = sub i64 %ipd, %ip0
- %pstor3 = getelementptr i64, i64* %pcmp, i64 3
- store i64 %offd, i64* %pstor3
+ %pstor3 = getelementptr i64, ptr %pcmp, i64 3
+ store i64 %offd, ptr %pstor3
; Fold memrchr(a, 'e', 16) - a to 4 (7 in LE)
- %pe = call i8* @memrchr(i8* %p1, i32 101, i64 20)
- %ipe = ptrtoint i8* %pe to i64
+ %pe = call ptr @memrchr(ptr @a, i32 101, i64 20)
+ %ipe = ptrtoint ptr %pe to i64
%offe = sub i64 %ipe, %ip0
- %pstor4 = getelementptr i64, i64* %pcmp, i64 4
- store i64 %offe, i64* %pstor4
+ %pstor4 = getelementptr i64, ptr %pcmp, i64 4
+ store i64 %offe, ptr %pstor4
ret void
}
; in equality expressions don't cause trouble and either are folded when
; they might be valid or not when they're provably undefined.
-declare i8* @memrchr(i8*, i32, i64)
+declare ptr @memrchr(ptr, i32, i64)
@a5 = constant [5 x i8] c"12345"
; CHECK-LABEL: @call_memrchr_ap5_c_1_eq_a(
; CHECK-NEXT: ret i1
;
- %pap5 = getelementptr [5 x i8], [5 x i8]* @a5, i32 0, i32 5
- %qap5 = getelementptr [5 x i8], [5 x i8]* @a5, i32 1, i32 0
- %q = call i8* @memrchr(i8* %pap5, i32 %c, i64 1)
- %cmp = icmp eq i8* %q, %qap5
+ %pap5 = getelementptr [5 x i8], ptr @a5, i32 0, i32 5
+ %qap5 = getelementptr [5 x i8], ptr @a5, i32 1, i32 0
+ %q = call ptr @memrchr(ptr %pap5, i32 %c, i64 1)
+ %cmp = icmp eq ptr %q, %qap5
ret i1 %cmp
}
; CHECK-LABEL: @call_memrchr_ap5_c_5_eq_a(
; CHECK-NEXT: ret i1
;
- %pap5 = getelementptr [5 x i8], [5 x i8]* @a5, i32 0, i32 5
- %qap5 = getelementptr [5 x i8], [5 x i8]* @a5, i32 1, i32 0
- %q = call i8* @memrchr(i8* %pap5, i32 %c, i64 5)
- %cmp = icmp eq i8* %q, %qap5
+ %pap5 = getelementptr [5 x i8], ptr @a5, i32 0, i32 5
+ %qap5 = getelementptr [5 x i8], ptr @a5, i32 1, i32 0
+ %q = call ptr @memrchr(ptr %pap5, i32 %c, i64 5)
+ %cmp = icmp eq ptr %q, %qap5
ret i1 %cmp
}
; CHECK-LABEL: @fold_memrchr_ap5_c_n_eq_a(
; CHECK-NEXT: ret i1 false
;
- %pa = getelementptr [5 x i8], [5 x i8]* @a5, i32 0, i32 0
- %pap5 = getelementptr [5 x i8], [5 x i8]* @a5, i32 0, i32 5
- %q = call i8* @memrchr(i8* %pap5, i32 %c, i64 %n)
- %cmp = icmp eq i8* %q, %pa
+ %pap5 = getelementptr [5 x i8], ptr @a5, i32 0, i32 5
+ %q = call ptr @memrchr(ptr %pap5, i32 %c, i64 %n)
+ %cmp = icmp eq ptr %q, @a5
ret i1 %cmp
}
; CHECK-LABEL: @fold_memrchr_ap5_c_n_eqz(
; CHECK-NEXT: ret i1 true
;
- %p = getelementptr [5 x i8], [5 x i8]* @a5, i32 0, i32 5
- %q = call i8* @memrchr(i8* %p, i32 %c, i64 %n)
- %cmp = icmp eq i8* %q, null
+ %p = getelementptr [5 x i8], ptr @a5, i32 0, i32 5
+ %q = call ptr @memrchr(ptr %p, i32 %c, i64 %n)
+ %cmp = icmp eq ptr %q, null
ret i1 %cmp
}
; CHECK-LABEL: @fold_memrchr_a_nul_n_eqz(
; CHECK-NEXT: ret i1 true
;
- %p = getelementptr [5 x i8], [5 x i8]* @a5, i32 0, i32 5
- %q = call i8* @memrchr(i8* %p, i32 0, i64 %n)
- %cmp = icmp eq i8* %q, null
+ %p = getelementptr [5 x i8], ptr @a5, i32 0, i32 5
+ %q = call ptr @memrchr(ptr %p, i32 0, i64 %n)
+ %cmp = icmp eq ptr %q, null
ret i1 %cmp
}
; RUN: opt < %s -passes=instcombine -S | FileCheck %s
-declare i8* @memrchr(i8*, i32, i32)
+declare ptr @memrchr(ptr, i32, i32)
-define i8* @test1(i8* %str, i32 %c, i32 %n) {
+define ptr @test1(ptr %str, i32 %c, i32 %n) {
; CHECK-LABEL: @test1(
-; CHECK-NEXT: [[RET:%.*]] = call i8* @memrchr(i8* [[STR:%.*]], i32 [[C:%.*]], i32 [[N:%.*]])
-; CHECK-NEXT: ret i8* [[RET]]
+; CHECK-NEXT: [[RET:%.*]] = call ptr @memrchr(ptr [[STR:%.*]], i32 [[C:%.*]], i32 [[N:%.*]])
+; CHECK-NEXT: ret ptr [[RET]]
;
- %ret = call i8* @memrchr(i8* %str, i32 %c, i32 %n)
- ret i8* %ret
+ %ret = call ptr @memrchr(ptr %str, i32 %c, i32 %n)
+ ret ptr %ret
}
-define i8* @test2(i8* %str, i32 %c, i32 %n) {
+define ptr @test2(ptr %str, i32 %c, i32 %n) {
; CHECK-LABEL: @test2(
-; CHECK-NEXT: [[RET:%.*]] = call i8* @memrchr(i8* nonnull [[STR:%.*]], i32 [[C:%.*]], i32 [[N:%.*]])
-; CHECK-NEXT: ret i8* [[RET]]
+; CHECK-NEXT: [[RET:%.*]] = call ptr @memrchr(ptr nonnull [[STR:%.*]], i32 [[C:%.*]], i32 [[N:%.*]])
+; CHECK-NEXT: ret ptr [[RET]]
;
- %ret = call i8* @memrchr(i8* nonnull %str, i32 %c, i32 %n)
- ret i8* %ret
+ %ret = call ptr @memrchr(ptr nonnull %str, i32 %c, i32 %n)
+ ret ptr %ret
}
-define i8* @test3(i8* %str, i32 %c) {
+define ptr @test3(ptr %str, i32 %c) {
; CHECK-LABEL: @test3(
-; CHECK-NEXT: [[RET:%.*]] = call i8* @memrchr(i8* [[STR:%.*]], i32 [[C:%.*]], i32 5)
-; CHECK-NEXT: ret i8* [[RET]]
+; CHECK-NEXT: [[RET:%.*]] = call ptr @memrchr(ptr [[STR:%.*]], i32 [[C:%.*]], i32 5)
+; CHECK-NEXT: ret ptr [[RET]]
;
- %ret = call i8* @memrchr(i8* %str, i32 %c, i32 5)
- ret i8* %ret
+ %ret = call ptr @memrchr(ptr %str, i32 %c, i32 5)
+ ret ptr %ret
}
-define i8* @test4(i8* %str, i32 %c) null_pointer_is_valid {
+define ptr @test4(ptr %str, i32 %c) null_pointer_is_valid {
; CHECK-LABEL: @test4(
-; CHECK-NEXT: [[RET:%.*]] = call i8* @memrchr(i8* [[STR:%.*]], i32 [[C:%.*]], i32 5)
-; CHECK-NEXT: ret i8* [[RET]]
+; CHECK-NEXT: [[RET:%.*]] = call ptr @memrchr(ptr [[STR:%.*]], i32 [[C:%.*]], i32 5)
+; CHECK-NEXT: ret ptr [[RET]]
;
- %ret = call i8* @memrchr(i8* %str, i32 %c, i32 5)
- ret i8* %ret
+ %ret = call ptr @memrchr(ptr %str, i32 %c, i32 5)
+ ret ptr %ret
}
-define i8* @test5(i8* %str, i32 %c) {
+define ptr @test5(ptr %str, i32 %c) {
; CHECK-LABEL: @test5(
-; CHECK-NEXT: [[RET:%.*]] = call i8* @memrchr(i8* [[STR:%.*]], i32 [[C:%.*]], i32 0)
-; CHECK-NEXT: ret i8* [[RET]]
+; CHECK-NEXT: [[RET:%.*]] = call ptr @memrchr(ptr [[STR:%.*]], i32 [[C:%.*]], i32 0)
+; CHECK-NEXT: ret ptr [[RET]]
;
- %ret = call i8* @memrchr(i8* %str, i32 %c, i32 0)
- ret i8* %ret
+ %ret = call ptr @memrchr(ptr %str, i32 %c, i32 0)
+ ret ptr %ret
}
target datalayout = "e-p:32:32:32-i1:8:8-i8:8:8-i16:16:16-i32:32:32-i64:32:64-f32:32:32-f64:32:64-v64:64:64-v128:128:128-a0:0:64-f80:128:128"
-declare i8* @memset(i8*, i32, i32)
-declare void @llvm.memset.p0i8.i32(i8* nocapture writeonly, i8, i32, i32, i1)
-declare noalias i8* @malloc(i32) #1
+declare ptr @memset(ptr, i32, i32)
+declare void @llvm.memset.p0.i32(ptr nocapture writeonly, i8, i32, i32, i1)
+declare noalias ptr @malloc(i32) #1
; Check memset(mem1, val, size) -> llvm.memset(mem1, val, size, 1).
-define i8* @test_simplify1(i8* %mem, i32 %val, i32 %size) {
+define ptr @test_simplify1(ptr %mem, i32 %val, i32 %size) {
; CHECK-LABEL: @test_simplify1(
; CHECK-NEXT: [[TMP1:%.*]] = trunc i32 [[VAL:%.*]] to i8
-; CHECK-NEXT: call void @llvm.memset.p0i8.i32(i8* align 1 [[MEM:%.*]], i8 [[TMP1]], i32 [[SIZE:%.*]], i1 false)
-; CHECK-NEXT: ret i8* [[MEM]]
+; CHECK-NEXT: call void @llvm.memset.p0.i32(ptr align 1 [[MEM:%.*]], i8 [[TMP1]], i32 [[SIZE:%.*]], i1 false)
+; CHECK-NEXT: ret ptr [[MEM]]
;
- %ret = call i8* @memset(i8* %mem, i32 %val, i32 %size)
- ret i8* %ret
+ %ret = call ptr @memset(ptr %mem, i32 %val, i32 %size)
+ ret ptr %ret
}
-define i8* @test_simplify1_tail(i8* %mem, i32 %val, i32 %size) {
+define ptr @test_simplify1_tail(ptr %mem, i32 %val, i32 %size) {
; CHECK-LABEL: @test_simplify1_tail(
; CHECK-NEXT: [[TMP1:%.*]] = trunc i32 [[VAL:%.*]] to i8
-; CHECK-NEXT: tail call void @llvm.memset.p0i8.i32(i8* align 1 [[MEM:%.*]], i8 [[TMP1]], i32 [[SIZE:%.*]], i1 false)
-; CHECK-NEXT: ret i8* [[MEM]]
+; CHECK-NEXT: tail call void @llvm.memset.p0.i32(ptr align 1 [[MEM:%.*]], i8 [[TMP1]], i32 [[SIZE:%.*]], i1 false)
+; CHECK-NEXT: ret ptr [[MEM]]
;
- %ret = tail call i8* @memset(i8* %mem, i32 %val, i32 %size)
- ret i8* %ret
+ %ret = tail call ptr @memset(ptr %mem, i32 %val, i32 %size)
+ ret ptr %ret
}
-define i8* @test_simplify1_musttail(i8* %mem, i32 %val, i32 %size) {
+define ptr @test_simplify1_musttail(ptr %mem, i32 %val, i32 %size) {
; CHECK-LABEL: @test_simplify1_musttail(
-; CHECK-NEXT: %ret = musttail call i8* @memset(i8* %mem, i32 %val, i32 %size)
-; CHECK-NEXT: ret i8* %ret
+; CHECK-NEXT: %ret = musttail call ptr @memset(ptr %mem, i32 %val, i32 %size)
+; CHECK-NEXT: ret ptr %ret
;
- %ret = musttail call i8* @memset(i8* %mem, i32 %val, i32 %size)
- ret i8* %ret
+ %ret = musttail call ptr @memset(ptr %mem, i32 %val, i32 %size)
+ ret ptr %ret
}
; Malloc + memset pattern is now handled by DSE in a more general way.
-define i8* @pr25892_lite(i32 %size) #0 {
+define ptr @pr25892_lite(i32 %size) #0 {
; CHECK-LABEL: @pr25892_lite(
-; CHECK-NEXT: [[CALL:%.*]] = call i8* @malloc(i32 [[SIZE:%.*]])
-; CHECK-NEXT: call void @llvm.memset.p0i8.i32(i8* align 1 [[CALL]], i8 0, i32 [[SIZE]], i1 false)
+; CHECK-NEXT: [[CALL:%.*]] = call ptr @malloc(i32 [[SIZE:%.*]])
+; CHECK-NEXT: call void @llvm.memset.p0.i32(ptr align 1 [[CALL]], i8 0, i32 [[SIZE]], i1 false)
;
- %call1 = call i8* @malloc(i32 %size) #1
- %call2 = call i8* @memset(i8* %call1, i32 0, i32 %size) #1
- ret i8* %call2
+ %call1 = call ptr @malloc(i32 %size) #1
+ %call2 = call ptr @memset(ptr %call1, i32 0, i32 %size) #1
+ ret ptr %call2
}
; A memset intrinsic should be handled similarly to a memset() libcall.
; Notice that malloc + memset pattern is now handled by DSE in a more general way.
-define i8* @malloc_and_memset_intrinsic(i32 %n) #0 {
+define ptr @malloc_and_memset_intrinsic(i32 %n) #0 {
; CHECK-LABEL: @malloc_and_memset_intrinsic(
-; CHECK-NEXT: [[CALL:%.*]] = call i8* @malloc(i32 [[N:%.*]])
-; CHECK-NEXT: call void @llvm.memset.p0i8.i32(i8* align 1 [[CALL]], i8 0, i32 [[N]], i1 false)
-; CHECK-NEXT: ret i8* [[CALL]]
+; CHECK-NEXT: [[CALL:%.*]] = call ptr @malloc(i32 [[N:%.*]])
+; CHECK-NEXT: call void @llvm.memset.p0.i32(ptr align 1 [[CALL]], i8 0, i32 [[N]], i1 false)
+; CHECK-NEXT: ret ptr [[CALL]]
;
- %call = call i8* @malloc(i32 %n)
- call void @llvm.memset.p0i8.i32(i8* %call, i8 0, i32 %n, i32 1, i1 false)
- ret i8* %call
+ %call = call ptr @malloc(i32 %n)
+ call void @llvm.memset.p0.i32(ptr %call, i8 0, i32 %n, i32 1, i1 false)
+ ret ptr %call
}
; This should not create a calloc and should not crash the compiler.
; Notice that malloc + memset pattern is now handled by DSE in a more general way.
-define i8* @notmalloc_memset(i32 %size, i8*(i32)* %notmalloc) {
+define ptr @notmalloc_memset(i32 %size, ptr %notmalloc) {
; CHECK-LABEL: @notmalloc_memset(
-; CHECK-NEXT: [[CALL1:%.*]] = call i8* [[NOTMALLOC:%.*]](i32 [[SIZE:%.*]]) [[ATTR0:#.*]]
-; CHECK-NEXT: call void @llvm.memset.p0i8.i32(i8* align 1 [[CALL1]], i8 0, i32 [[SIZE]], i1 false) [[ATTR0]]
-; CHECK-NEXT: ret i8* [[CALL1]]
+; CHECK-NEXT: [[CALL1:%.*]] = call ptr [[NOTMALLOC:%.*]](i32 [[SIZE:%.*]]) [[ATTR0:#.*]]
+; CHECK-NEXT: call void @llvm.memset.p0.i32(ptr align 1 [[CALL1]], i8 0, i32 [[SIZE]], i1 false) [[ATTR0]]
+; CHECK-NEXT: ret ptr [[CALL1]]
;
- %call1 = call i8* %notmalloc(i32 %size) #1
- %call2 = call i8* @memset(i8* %call1, i32 0, i32 %size) #1
- ret i8* %call2
+ %call1 = call ptr %notmalloc(i32 %size) #1
+ %call2 = call ptr @memset(ptr %call1, i32 0, i32 %size) #1
+ ret ptr %call2
}
; This doesn't fire currently because the malloc has more than one use.
; Notice that malloc + memset pattern is now handled by DSE in a more general way.
-define float* @pr25892(i32 %size) #0 {
+define ptr @pr25892(i32 %size) #0 {
; CHECK-LABEL: @pr25892(
; CHECK-NEXT: entry:
-; CHECK-NEXT: [[CALL:%.*]] = tail call i8* @malloc(i32 [[SIZE:%.*]]) [[ATTR0]]
-; CHECK-NEXT: [[CMP:%.*]] = icmp eq i8* [[CALL]], null
+; CHECK-NEXT: [[CALL:%.*]] = tail call ptr @malloc(i32 [[SIZE:%.*]]) [[ATTR0]]
+; CHECK-NEXT: [[CMP:%.*]] = icmp eq ptr [[CALL]], null
; CHECK-NEXT: br i1 [[CMP]], label [[CLEANUP:%.*]], label [[IF_END:%.*]]
; CHECK: if.end:
-; CHECK-NEXT: [[BC:%.*]] = bitcast i8* [[CALL]] to float*
-; CHECK-NEXT: call void @llvm.memset.p0i8.i32(i8* nonnull align 1 [[CALL]], i8 0, i32 [[SIZE]], i1 false) [[ATTR0]]
+; CHECK-NEXT: call void @llvm.memset.p0.i32(ptr nonnull align 1 [[CALL]], i8 0, i32 [[SIZE]], i1 false) [[ATTR0]]
; CHECK-NEXT: br label [[CLEANUP]]
; CHECK: cleanup:
-; CHECK-NEXT: [[RETVAL_0:%.*]] = phi float* [ [[BC]], [[IF_END]] ], [ null, [[ENTRY:%.*]] ]
-; CHECK-NEXT: ret float* [[RETVAL_0]]
+; CHECK-NEXT: [[RETVAL_0:%.*]] = phi ptr [ [[CALL]], [[IF_END]] ], [ null, [[ENTRY:%.*]] ]
+; CHECK-NEXT: ret ptr [[RETVAL_0]]
;
entry:
- %call = tail call i8* @malloc(i32 %size) #1
- %cmp = icmp eq i8* %call, null
+ %call = tail call ptr @malloc(i32 %size) #1
+ %cmp = icmp eq ptr %call, null
br i1 %cmp, label %cleanup, label %if.end
if.end:
- %bc = bitcast i8* %call to float*
- %call2 = tail call i8* @memset(i8* nonnull %call, i32 0, i32 %size) #1
+ %call2 = tail call ptr @memset(ptr nonnull %call, i32 0, i32 %size) #1
br label %cleanup
cleanup:
- %retval.0 = phi float* [ %bc, %if.end ], [ null, %entry ]
- ret float* %retval.0
+ %retval.0 = phi ptr [ %call, %if.end ], [ null, %entry ]
+ ret ptr %retval.0
}
; If there's a calloc transform, the store must also be eliminated.
-define i8* @buffer_is_modified_then_memset(i32 %size) {
+define ptr @buffer_is_modified_then_memset(i32 %size) {
; CHECK-LABEL: @buffer_is_modified_then_memset(
-; CHECK-NEXT: [[PTR:%.*]] = tail call i8* @malloc(i32 [[SIZE:%.*]]) [[ATTR0]]
-; CHECK-NEXT: store i8 1, i8* [[PTR]], align 1
-; CHECK-NEXT: call void @llvm.memset.p0i8.i32(i8* nonnull align 1 [[PTR]], i8 0, i32 [[SIZE]], i1 false) [[ATTR0]]
-; CHECK-NEXT: ret i8* [[PTR]]
+; CHECK-NEXT: [[PTR:%.*]] = tail call ptr @malloc(i32 [[SIZE:%.*]]) [[ATTR0]]
+; CHECK-NEXT: store i8 1, ptr [[PTR]], align 1
+; CHECK-NEXT: call void @llvm.memset.p0.i32(ptr nonnull align 1 [[PTR]], i8 0, i32 [[SIZE]], i1 false) [[ATTR0]]
+; CHECK-NEXT: ret ptr [[PTR]]
;
- %ptr = tail call i8* @malloc(i32 %size) #1
- store i8 1, i8* %ptr ;; fdata[0] = 1;
- %memset = tail call i8* @memset(i8* nonnull %ptr, i32 0, i32 %size) #1
- ret i8* %memset
+ %ptr = tail call ptr @malloc(i32 %size) #1
+ store i8 1, ptr %ptr ;; fdata[0] = 1;
+ %memset = tail call ptr @memset(ptr nonnull %ptr, i32 0, i32 %size) #1
+ ret ptr %memset
}
-define i8* @memset_size_select(i1 %b, i8* %ptr) {
+define ptr @memset_size_select(i1 %b, ptr %ptr) {
; CHECK-LABEL: @memset_size_select(
; CHECK-NEXT: [[SIZE:%.*]] = select i1 [[B:%.*]], i32 10, i32 50
-; CHECK-NEXT: call void @llvm.memset.p0i8.i32(i8* noundef nonnull align 1 dereferenceable(10) [[PTR:%.*]], i8 0, i32 [[SIZE]], i1 false) [[ATTR0]]
-; CHECK-NEXT: ret i8* [[PTR]]
+; CHECK-NEXT: call void @llvm.memset.p0.i32(ptr noundef nonnull align 1 dereferenceable(10) [[PTR:%.*]], i8 0, i32 [[SIZE]], i1 false) [[ATTR0]]
+; CHECK-NEXT: ret ptr [[PTR]]
;
%size = select i1 %b, i32 10, i32 50
- %memset = tail call i8* @memset(i8* nonnull %ptr, i32 0, i32 %size) #1
- ret i8* %memset
+ %memset = tail call ptr @memset(ptr nonnull %ptr, i32 0, i32 %size) #1
+ ret ptr %memset
}
-define i8* @memset_size_select2(i1 %b, i8* %ptr) {
+define ptr @memset_size_select2(i1 %b, ptr %ptr) {
; CHECK-LABEL: @memset_size_select2(
; CHECK-NEXT: [[SIZE:%.*]] = select i1 [[B:%.*]], i32 10, i32 50
-; CHECK-NEXT: call void @llvm.memset.p0i8.i32(i8* noundef nonnull align 1 dereferenceable(80) [[PTR:%.*]], i8 0, i32 [[SIZE]], i1 false) [[ATTR0]]
-; CHECK-NEXT: ret i8* [[PTR]]
+; CHECK-NEXT: call void @llvm.memset.p0.i32(ptr noundef nonnull align 1 dereferenceable(80) [[PTR:%.*]], i8 0, i32 [[SIZE]], i1 false) [[ATTR0]]
+; CHECK-NEXT: ret ptr [[PTR]]
;
%size = select i1 %b, i32 10, i32 50
- %memset = tail call i8* @memset(i8* nonnull dereferenceable(80) %ptr, i32 0, i32 %size) #1
- ret i8* %memset
+ %memset = tail call ptr @memset(ptr nonnull dereferenceable(80) %ptr, i32 0, i32 %size) #1
+ ret ptr %memset
}
-define i8* @memset_size_select3(i1 %b, i8* %ptr) {
+define ptr @memset_size_select3(i1 %b, ptr %ptr) {
; CHECK-LABEL: @memset_size_select3(
; CHECK-NEXT: [[SIZE:%.*]] = select i1 [[B:%.*]], i32 10, i32 50
-; CHECK-NEXT: call void @llvm.memset.p0i8.i32(i8* noundef nonnull align 1 dereferenceable(40) [[PTR:%.*]], i8 0, i32 [[SIZE]], i1 false)
-; CHECK-NEXT: ret i8* [[PTR]]
+; CHECK-NEXT: call void @llvm.memset.p0.i32(ptr noundef nonnull align 1 dereferenceable(40) [[PTR:%.*]], i8 0, i32 [[SIZE]], i1 false)
+; CHECK-NEXT: ret ptr [[PTR]]
;
%size = select i1 %b, i32 10, i32 50
- %memset = tail call i8* @memset(i8* dereferenceable_or_null(40) %ptr, i32 0, i32 %size)
- ret i8* %memset
+ %memset = tail call ptr @memset(ptr dereferenceable_or_null(40) %ptr, i32 0, i32 %size)
+ ret ptr %memset
}
-define i8* @memset_size_select4(i1 %b, i8* %ptr) {
+define ptr @memset_size_select4(i1 %b, ptr %ptr) {
; CHECK-LABEL: @memset_size_select4(
; CHECK-NEXT: [[SIZE:%.*]] = select i1 [[B:%.*]], i32 10, i32 50
-; CHECK-NEXT: call void @llvm.memset.p0i8.i32(i8* noundef nonnull align 1 dereferenceable(40) [[PTR:%.*]], i8 0, i32 [[SIZE]], i1 false) [[ATTR0]]
-; CHECK-NEXT: ret i8* [[PTR]]
+; CHECK-NEXT: call void @llvm.memset.p0.i32(ptr noundef nonnull align 1 dereferenceable(40) [[PTR:%.*]], i8 0, i32 [[SIZE]], i1 false) [[ATTR0]]
+; CHECK-NEXT: ret ptr [[PTR]]
;
%size = select i1 %b, i32 10, i32 50
- %memset = tail call i8* @memset(i8* nonnull dereferenceable_or_null(40) %ptr, i32 0, i32 %size) #1
- ret i8* %memset
+ %memset = tail call ptr @memset(ptr nonnull dereferenceable_or_null(40) %ptr, i32 0, i32 %size) #1
+ ret ptr %memset
}
-define i8* @memset_size_ashr(i1 %b, i8* %ptr, i32 %v) {
+define ptr @memset_size_ashr(i1 %b, ptr %ptr, i32 %v) {
; CHECK-LABEL: @memset_size_ashr(
; CHECK-NEXT: [[SIZE:%.*]] = ashr i32 -2, [[V:%.*]]
-; CHECK-NEXT: call void @llvm.memset.p0i8.i32(i8* noundef nonnull align 1 [[PTR:%.*]], i8 0, i32 [[SIZE]], i1 false) [[ATTR0]]
-; CHECK-NEXT: ret i8* [[PTR]]
+; CHECK-NEXT: call void @llvm.memset.p0.i32(ptr noundef nonnull align 1 [[PTR:%.*]], i8 0, i32 [[SIZE]], i1 false) [[ATTR0]]
+; CHECK-NEXT: ret ptr [[PTR]]
;
%size = ashr i32 -2, %v
- %memset = tail call i8* @memset(i8* nonnull %ptr, i32 0, i32 %size) #1
- ret i8* %memset
+ %memset = tail call ptr @memset(ptr nonnull %ptr, i32 0, i32 %size) #1
+ ret ptr %memset
}
-define i8* @memset_attrs1(i1 %b, i8* %ptr, i32 %size) {
+define ptr @memset_attrs1(i1 %b, ptr %ptr, i32 %size) {
; CHECK-LABEL: @memset_attrs1(
-; CHECK-NEXT: call void @llvm.memset.p0i8.i32(i8* align 1 dereferenceable_or_null(40) [[PTR:%.*]], i8 0, i32 [[SIZE:%.*]], i1 false) [[ATTR0]]
-; CHECK-NEXT: ret i8* [[PTR]]
+; CHECK-NEXT: call void @llvm.memset.p0.i32(ptr align 1 dereferenceable_or_null(40) [[PTR:%.*]], i8 0, i32 [[SIZE:%.*]], i1 false) [[ATTR0]]
+; CHECK-NEXT: ret ptr [[PTR]]
;
- %memset = tail call i8* @memset(i8* dereferenceable_or_null(40) %ptr, i32 0, i32 %size) #1
- ret i8* %memset
+ %memset = tail call ptr @memset(ptr dereferenceable_or_null(40) %ptr, i32 0, i32 %size) #1
+ ret ptr %memset
}
; be sure to drop nonnull since size is unknown and can be 0
; do not change dereferenceable attribute
-define i8* @memset_attrs2(i1 %b, i8* %ptr, i32 %size) {
+define ptr @memset_attrs2(i1 %b, ptr %ptr, i32 %size) {
; CHECK-LABEL: @memset_attrs2(
-; CHECK-NEXT: call void @llvm.memset.p0i8.i32(i8* nonnull align 1 dereferenceable(40) [[PTR:%.*]], i8 0, i32 [[SIZE:%.*]], i1 false) [[ATTR0]]
-; CHECK-NEXT: ret i8* [[PTR]]
+; CHECK-NEXT: call void @llvm.memset.p0.i32(ptr nonnull align 1 dereferenceable(40) [[PTR:%.*]], i8 0, i32 [[SIZE:%.*]], i1 false) [[ATTR0]]
+; CHECK-NEXT: ret ptr [[PTR]]
;
- %memset = tail call i8* @memset(i8* nonnull dereferenceable(40) %ptr, i32 0, i32 %size) #1
- ret i8* %memset
+ %memset = tail call ptr @memset(ptr nonnull dereferenceable(40) %ptr, i32 0, i32 %size) #1
+ ret ptr %memset
}
; size is unknown, just copy attrs, no changes in attrs
-define i8* @memset_attrs3(i1 %b, i8* %ptr, i32 %size) {
+define ptr @memset_attrs3(i1 %b, ptr %ptr, i32 %size) {
; CHECK-LABEL: @memset_attrs3(
-; CHECK-NEXT: call void @llvm.memset.p0i8.i32(i8* nonnull align 1 dereferenceable_or_null(40) [[PTR:%.*]], i8 0, i32 [[SIZE:%.*]], i1 false) [[ATTR0]]
-; CHECK-NEXT: ret i8* [[PTR]]
+; CHECK-NEXT: call void @llvm.memset.p0.i32(ptr nonnull align 1 dereferenceable_or_null(40) [[PTR:%.*]], i8 0, i32 [[SIZE:%.*]], i1 false) [[ATTR0]]
+; CHECK-NEXT: ret ptr [[PTR]]
;
- %memset = tail call i8* @memset(i8* nonnull dereferenceable_or_null(40) %ptr, i32 0, i32 %size) #1
- ret i8* %memset
+ %memset = tail call ptr @memset(ptr nonnull dereferenceable_or_null(40) %ptr, i32 0, i32 %size) #1
+ ret ptr %memset
}
; be sure to drop nonnull since size is unknown and can be 0
-define i8* @memset_attrs4(i1 %b, i8* %ptr, i32 %size) {
+define ptr @memset_attrs4(i1 %b, ptr %ptr, i32 %size) {
; CHECK-LABEL: @memset_attrs4(
-; CHECK-NEXT: call void @llvm.memset.p0i8.i32(i8* nonnull align 1 [[PTR:%.*]], i8 0, i32 [[SIZE:%.*]], i1 false) [[ATTR0]]
-; CHECK-NEXT: ret i8* [[PTR]]
+; CHECK-NEXT: call void @llvm.memset.p0.i32(ptr nonnull align 1 [[PTR:%.*]], i8 0, i32 [[SIZE:%.*]], i1 false) [[ATTR0]]
+; CHECK-NEXT: ret ptr [[PTR]]
;
- %memset = tail call i8* @memset(i8* nonnull %ptr, i32 0, i32 %size) #1
- ret i8* %memset
+ %memset = tail call ptr @memset(ptr nonnull %ptr, i32 0, i32 %size) #1
+ ret ptr %memset
}
-define i8* @test_no_incompatible_attr(i8* %mem, i32 %val, i32 %size) {
+define ptr @test_no_incompatible_attr(ptr %mem, i32 %val, i32 %size) {
; CHECK-LABEL: @test_no_incompatible_attr(
; CHECK-NEXT: [[TMP1:%.*]] = trunc i32 [[VAL:%.*]] to i8
-; CHECK-NEXT: call void @llvm.memset.p0i8.i32(i8* align 1 [[MEM:%.*]], i8 [[TMP1]], i32 [[SIZE:%.*]], i1 false)
-; CHECK-NEXT: ret i8* [[MEM]]
+; CHECK-NEXT: call void @llvm.memset.p0.i32(ptr align 1 [[MEM:%.*]], i8 [[TMP1]], i32 [[SIZE:%.*]], i1 false)
+; CHECK-NEXT: ret ptr [[MEM]]
;
- %ret = call dereferenceable(1) i8* @memset(i8* %mem, i32 %val, i32 %size)
- ret i8* %ret
+ %ret = call dereferenceable(1) ptr @memset(ptr %mem, i32 %val, i32 %size)
+ ret ptr %ret
}
attributes #0 = { nounwind ssp uwtable }
target datalayout = "e-p:32:32:32-i1:8:8-i8:8:8-i16:16:16-i32:32:32-i64:32:64-f32:32:32-f64:32:64-v64:64:64-v128:128:128-a0:0:64-f80:128:128"
-declare i8 @memset(i8*, i32, i32)
+declare i8 @memset(ptr, i32, i32)
; Check that memset functions with the wrong prototype aren't simplified.
-define i8 @test_no_simplify1(i8* %mem, i32 %val, i32 %size) {
+define i8 @test_no_simplify1(ptr %mem, i32 %val, i32 %size) {
; CHECK-LABEL: @test_no_simplify1(
- %ret = call i8 @memset(i8* %mem, i32 %val, i32 %size)
+ %ret = call i8 @memset(ptr %mem, i32 %val, i32 %size)
; CHECK: call i8 @memset
ret i8 %ret
; CHECK: ret i8 %ret
; NOTE: Assertions have been autogenerated by utils/update_test_checks.py
; RUN: opt < %s -passes=instcombine -S | FileCheck %s
-define i32 @test([1024 x i8]* %target) {
+define i32 @test(ptr %target) {
; CHECK-LABEL: @test(
-; CHECK-NEXT: [[TMP1:%.*]] = getelementptr [1024 x i8], [1024 x i8]* [[TARGET:%.*]], i64 0, i64 0
-; CHECK-NEXT: store i8 1, i8* [[TMP1]], align 1
-; CHECK-NEXT: [[TMP2:%.*]] = bitcast [1024 x i8]* [[TARGET]] to i16*
-; CHECK-NEXT: store i16 257, i16* [[TMP2]], align 2
-; CHECK-NEXT: [[TMP3:%.*]] = bitcast [1024 x i8]* [[TARGET]] to i32*
-; CHECK-NEXT: store i32 16843009, i32* [[TMP3]], align 4
-; CHECK-NEXT: [[TMP4:%.*]] = bitcast [1024 x i8]* [[TARGET]] to i64*
-; CHECK-NEXT: store i64 72340172838076673, i64* [[TMP4]], align 8
+; CHECK-NEXT: store i8 1, ptr [[TARGET:%.*]], align 1
+; CHECK-NEXT: store i16 257, ptr [[TARGET]], align 2
+; CHECK-NEXT: store i32 16843009, ptr [[TARGET]], align 4
+; CHECK-NEXT: store i64 72340172838076673, ptr [[TARGET]], align 8
; CHECK-NEXT: ret i32 0
;
- %target_p = getelementptr [1024 x i8], [1024 x i8]* %target, i32 0, i32 0
- call void @llvm.memset.p0i8.i32(i8* %target_p, i8 1, i32 0, i1 false)
- call void @llvm.memset.p0i8.i32(i8* %target_p, i8 1, i32 1, i1 false)
- call void @llvm.memset.p0i8.i32(i8* align 2 %target_p, i8 1, i32 2, i1 false)
- call void @llvm.memset.p0i8.i32(i8* align 4 %target_p, i8 1, i32 4, i1 false)
- call void @llvm.memset.p0i8.i32(i8* align 8 %target_p, i8 1, i32 8, i1 false)
+ call void @llvm.memset.p0.i32(ptr %target, i8 1, i32 0, i1 false)
+ call void @llvm.memset.p0.i32(ptr %target, i8 1, i32 1, i1 false)
+ call void @llvm.memset.p0.i32(ptr align 2 %target, i8 1, i32 2, i1 false)
+ call void @llvm.memset.p0.i32(ptr align 4 %target, i8 1, i32 4, i1 false)
+ call void @llvm.memset.p0.i32(ptr align 8 %target, i8 1, i32 8, i1 false)
ret i32 0
}
; CHECK-LABEL: @memset_to_constant(
; CHECK-NEXT: ret void
;
- %p = bitcast i128* @Unknown to i8*
- call void @llvm.memset.p0i8.i32(i8* %p, i8 0, i32 16, i1 false)
+ call void @llvm.memset.p0.i32(ptr @Unknown, i8 0, i32 16, i1 false)
ret void
}
; FIXME: This is technically incorrect because it might overwrite a poison
; value. Stop folding it once #52930 is resolved.
-define void @memset_undef(i8* %p) {
+define void @memset_undef(ptr %p) {
; CHECK-LABEL: @memset_undef(
; CHECK-NEXT: ret void
;
- call void @llvm.memset.p0i8.i32(i8* %p, i8 undef, i32 8, i1 false)
+ call void @llvm.memset.p0.i32(ptr %p, i8 undef, i32 8, i1 false)
ret void
}
-define void @memset_undef_volatile(i8* %p) {
+define void @memset_undef_volatile(ptr %p) {
; CHECK-LABEL: @memset_undef_volatile(
-; CHECK-NEXT: call void @llvm.memset.p0i8.i32(i8* [[P:%.*]], i8 undef, i32 8, i1 true)
+; CHECK-NEXT: call void @llvm.memset.p0.i32(ptr [[P:%.*]], i8 undef, i32 8, i1 true)
; CHECK-NEXT: ret void
;
- call void @llvm.memset.p0i8.i32(i8* %p, i8 undef, i32 8, i1 true)
+ call void @llvm.memset.p0.i32(ptr %p, i8 undef, i32 8, i1 true)
ret void
}
-define void @memset_poison(i8* %p) {
+define void @memset_poison(ptr %p) {
; CHECK-LABEL: @memset_poison(
; CHECK-NEXT: ret void
;
- call void @llvm.memset.p0i8.i32(i8* %p, i8 poison, i32 8, i1 false)
+ call void @llvm.memset.p0.i32(ptr %p, i8 poison, i32 8, i1 false)
ret void
}
-define void @memset_poison_volatile(i8* %p) {
+define void @memset_poison_volatile(ptr %p) {
; CHECK-LABEL: @memset_poison_volatile(
-; CHECK-NEXT: call void @llvm.memset.p0i8.i32(i8* [[P:%.*]], i8 poison, i32 8, i1 true)
+; CHECK-NEXT: call void @llvm.memset.p0.i32(ptr [[P:%.*]], i8 poison, i32 8, i1 true)
; CHECK-NEXT: ret void
;
- call void @llvm.memset.p0i8.i32(i8* %p, i8 poison, i32 8, i1 true)
+ call void @llvm.memset.p0.i32(ptr %p, i8 poison, i32 8, i1 true)
ret void
}
-declare void @llvm.memset.p0i8.i32(i8* nocapture writeonly, i8, i32, i1) argmemonly nounwind
+declare void @llvm.memset.p0.i32(ptr nocapture writeonly, i8, i32, i1) argmemonly nounwind
define void @test_no_simplify() {
; CHECK-LABEL: @test_no_simplify(
- %dst = bitcast %struct.T* @t to i8*
-; CHECK-NEXT: call i8* @__memset_chk
- call i8* @__memset_chk(i8* %dst, i32 0, i64 1824)
+; CHECK-NEXT: call ptr @__memset_chk
+ call ptr @__memset_chk(ptr @t, i32 0, i64 1824)
ret void
}
-declare i8* @__memset_chk(i8*, i32, i64)
+declare ptr @__memset_chk(ptr, i32, i64)
declare void @use.i8(i8)
declare void @use.i16(i16)
-define i1 @and_test1(i16* %x) {
+define i1 @and_test1(ptr %x) {
; CHECK-LABEL: @and_test1(
-; CHECK-NEXT: [[LOAD:%.*]] = load i16, i16* [[X:%.*]], align 4
+; CHECK-NEXT: [[LOAD:%.*]] = load i16, ptr [[X:%.*]], align 4
; CHECK-NEXT: [[TMP1:%.*]] = icmp eq i16 [[LOAD]], 17791
; CHECK-NEXT: ret i1 [[TMP1]]
;
- %load = load i16, i16* %x, align 4
+ %load = load i16, ptr %x, align 4
%trunc = trunc i16 %load to i8
%cmp1 = icmp eq i8 %trunc, 127
%and = and i16 %load, -256
ret i1 %or
}
-define i1 @and_test1_logical(i16* %x) {
+define i1 @and_test1_logical(ptr %x) {
; CHECK-LABEL: @and_test1_logical(
-; CHECK-NEXT: [[LOAD:%.*]] = load i16, i16* [[X:%.*]], align 4
+; CHECK-NEXT: [[LOAD:%.*]] = load i16, ptr [[X:%.*]], align 4
; CHECK-NEXT: [[TMP1:%.*]] = icmp eq i16 [[LOAD]], 17791
; CHECK-NEXT: ret i1 [[TMP1]]
;
- %load = load i16, i16* %x, align 4
+ %load = load i16, ptr %x, align 4
%trunc = trunc i16 %load to i8
%cmp1 = icmp eq i8 %trunc, 127
%and = and i16 %load, -256
ret i1 %or
}
-define <2 x i1> @and_test1_vector(<2 x i16>* %x) {
+define <2 x i1> @and_test1_vector(ptr %x) {
; CHECK-LABEL: @and_test1_vector(
-; CHECK-NEXT: [[LOAD:%.*]] = load <2 x i16>, <2 x i16>* [[X:%.*]], align 4
+; CHECK-NEXT: [[LOAD:%.*]] = load <2 x i16>, ptr [[X:%.*]], align 4
; CHECK-NEXT: [[TMP1:%.*]] = icmp eq <2 x i16> [[LOAD]], <i16 17791, i16 17791>
; CHECK-NEXT: ret <2 x i1> [[TMP1]]
;
- %load = load <2 x i16>, <2 x i16>* %x, align 4
+ %load = load <2 x i16>, ptr %x, align 4
%trunc = trunc <2 x i16> %load to <2 x i8>
%cmp1 = icmp eq <2 x i8> %trunc, <i8 127, i8 127>
%and = and <2 x i16> %load, <i16 -256, i16 -256>
ret <2 x i1> %or
}
-define i1 @and_test2(i16* %x) {
+define i1 @and_test2(ptr %x) {
; CHECK-LABEL: @and_test2(
-; CHECK-NEXT: [[LOAD:%.*]] = load i16, i16* [[X:%.*]], align 4
+; CHECK-NEXT: [[LOAD:%.*]] = load i16, ptr [[X:%.*]], align 4
; CHECK-NEXT: [[TMP1:%.*]] = icmp eq i16 [[LOAD]], 32581
; CHECK-NEXT: ret i1 [[TMP1]]
;
- %load = load i16, i16* %x, align 4
+ %load = load i16, ptr %x, align 4
%and = and i16 %load, -256
%cmp1 = icmp eq i16 %and, 32512
%trunc = trunc i16 %load to i8
ret i1 %or
}
-define i1 @and_test2_logical(i16* %x) {
+define i1 @and_test2_logical(ptr %x) {
; CHECK-LABEL: @and_test2_logical(
-; CHECK-NEXT: [[LOAD:%.*]] = load i16, i16* [[X:%.*]], align 4
+; CHECK-NEXT: [[LOAD:%.*]] = load i16, ptr [[X:%.*]], align 4
; CHECK-NEXT: [[TMP1:%.*]] = icmp eq i16 [[LOAD]], 32581
; CHECK-NEXT: ret i1 [[TMP1]]
;
- %load = load i16, i16* %x, align 4
+ %load = load i16, ptr %x, align 4
%and = and i16 %load, -256
%cmp1 = icmp eq i16 %and, 32512
%trunc = trunc i16 %load to i8
ret i1 %or
}
-define <2 x i1> @and_test2_vector(<2 x i16>* %x) {
+define <2 x i1> @and_test2_vector(ptr %x) {
; CHECK-LABEL: @and_test2_vector(
-; CHECK-NEXT: [[LOAD:%.*]] = load <2 x i16>, <2 x i16>* [[X:%.*]], align 4
+; CHECK-NEXT: [[LOAD:%.*]] = load <2 x i16>, ptr [[X:%.*]], align 4
; CHECK-NEXT: [[TMP1:%.*]] = icmp eq <2 x i16> [[LOAD]], <i16 32581, i16 32581>
; CHECK-NEXT: ret <2 x i1> [[TMP1]]
;
- %load = load <2 x i16>, <2 x i16>* %x, align 4
+ %load = load <2 x i16>, ptr %x, align 4
%and = and <2 x i16> %load, <i16 -256, i16 -256>
%cmp1 = icmp eq <2 x i16> %and, <i16 32512, i16 32512>
%trunc = trunc <2 x i16> %load to <2 x i8>
define void @_Z4testv() {
; CHECK-LABEL: @_Z4testv(
; CHECK-NEXT: bb:
-; CHECK-NEXT: [[I:%.*]] = load i8, i8* @var_7, align 1
+; CHECK-NEXT: [[I:%.*]] = load i8, ptr @var_7, align 1
; CHECK-NEXT: [[I1:%.*]] = icmp eq i8 [[I]], -1
-; CHECK-NEXT: [[I4:%.*]] = load i16, i16* @var_0, align 2
+; CHECK-NEXT: [[I4:%.*]] = load i16, ptr @var_0, align 2
; CHECK-NEXT: br i1 [[I1]], label [[BB10:%.*]], label [[BB9:%.*]]
; CHECK: bb9:
; CHECK-NEXT: br label [[BB12:%.*]]
; CHECK: bb10:
-; CHECK-NEXT: [[I2:%.*]] = load i32, i32* @var_1, align 4
+; CHECK-NEXT: [[I2:%.*]] = load i32, ptr @var_1, align 4
; CHECK-NEXT: [[I3:%.*]] = icmp eq i32 [[I2]], 0
-; CHECK-NEXT: [[I6:%.*]] = load i64, i64* @var_5, align 8
+; CHECK-NEXT: [[I6:%.*]] = load i64, ptr @var_5, align 8
; CHECK-NEXT: [[I5:%.*]] = sext i16 [[I4]] to i64
; CHECK-NEXT: [[I7:%.*]] = select i1 [[I3]], i64 [[I6]], i64 [[I5]]
; CHECK-NEXT: [[I11:%.*]] = trunc i64 [[I7]] to i32
; CHECK-NEXT: br label [[BB12]]
; CHECK: bb12:
; CHECK-NEXT: [[STOREMERGE1:%.*]] = phi i32 [ [[I11]], [[BB10]] ], [ 1, [[BB9]] ]
-; CHECK-NEXT: store i32 [[STOREMERGE1]], i32* getelementptr inbounds ([0 x i32], [0 x i32]* @arr_2, i64 0, i64 0), align 4
-; CHECK-NEXT: store i16 [[I4]], i16* getelementptr inbounds ([0 x i16], [0 x i16]* @arr_4, i64 0, i64 0), align 2
+; CHECK-NEXT: store i32 [[STOREMERGE1]], ptr @arr_2, align 4
+; CHECK-NEXT: store i16 [[I4]], ptr @arr_4, align 2
; CHECK-NEXT: [[I8:%.*]] = sext i16 [[I4]] to i32
-; CHECK-NEXT: store i32 [[I8]], i32* getelementptr inbounds ([8 x i32], [8 x i32]* @arr_3, i64 0, i64 0), align 16
-; CHECK-NEXT: store i32 [[STOREMERGE1]], i32* getelementptr inbounds ([0 x i32], [0 x i32]* @arr_2, i64 0, i64 1), align 4
-; CHECK-NEXT: store i16 [[I4]], i16* getelementptr inbounds ([0 x i16], [0 x i16]* @arr_4, i64 0, i64 1), align 2
-; CHECK-NEXT: store i32 [[I8]], i32* getelementptr inbounds ([8 x i32], [8 x i32]* @arr_3, i64 0, i64 1), align 4
+; CHECK-NEXT: store i32 [[I8]], ptr @arr_3, align 16
+; CHECK-NEXT: store i32 [[STOREMERGE1]], ptr getelementptr inbounds ([0 x i32], ptr @arr_2, i64 0, i64 1), align 4
+; CHECK-NEXT: store i16 [[I4]], ptr getelementptr inbounds ([0 x i16], ptr @arr_4, i64 0, i64 1), align 2
+; CHECK-NEXT: store i32 [[I8]], ptr getelementptr inbounds ([8 x i32], ptr @arr_3, i64 0, i64 1), align 4
; CHECK-NEXT: ret void
;
bb:
- %i = load i8, i8* @var_7, align 1
+ %i = load i8, ptr @var_7, align 1
%i1 = icmp eq i8 %i, -1
- %i2 = load i32, i32* @var_1, align 4
+ %i2 = load i32, ptr @var_1, align 4
%i3 = icmp eq i32 %i2, 0
- %i4 = load i16, i16* @var_0, align 2
+ %i4 = load i16, ptr @var_0, align 2
%i5 = sext i16 %i4 to i64
- %i6 = load i64, i64* @var_5, align 8
+ %i6 = load i64, ptr @var_5, align 8
%i7 = select i1 %i3, i64 %i6, i64 %i5
%i8 = sext i16 %i4 to i32
br i1 %i1, label %bb10, label %bb9
bb9: ; preds = %bb
- store i32 1, i32* getelementptr inbounds ([0 x i32], [0 x i32]* @arr_2, i64 0, i64 0), align 4
- store i16 %i4, i16* getelementptr inbounds ([0 x i16], [0 x i16]* @arr_4, i64 0, i64 0), align 2
- store i32 %i8, i32* getelementptr inbounds ([8 x i32], [8 x i32]* @arr_3, i64 0, i64 0), align 4
- store i32 1, i32* getelementptr inbounds ([0 x i32], [0 x i32]* @arr_2, i64 0, i64 1), align 4
- store i16 %i4, i16* getelementptr inbounds ([0 x i16], [0 x i16]* @arr_4, i64 0, i64 1), align 2
- store i32 %i8, i32* getelementptr inbounds ([8 x i32], [8 x i32]* @arr_3, i64 0, i64 1), align 4
+ store i32 1, ptr @arr_2, align 4
+ store i16 %i4, ptr @arr_4, align 2
+ store i32 %i8, ptr @arr_3, align 4
+ store i32 1, ptr getelementptr inbounds ([0 x i32], ptr @arr_2, i64 0, i64 1), align 4
+ store i16 %i4, ptr getelementptr inbounds ([0 x i16], ptr @arr_4, i64 0, i64 1), align 2
+ store i32 %i8, ptr getelementptr inbounds ([8 x i32], ptr @arr_3, i64 0, i64 1), align 4
br label %bb12
bb10: ; preds = %bb
%i11 = trunc i64 %i7 to i32
- store i32 %i11, i32* getelementptr inbounds ([0 x i32], [0 x i32]* @arr_2, i64 0, i64 0), align 4
- store i16 %i4, i16* getelementptr inbounds ([0 x i16], [0 x i16]* @arr_4, i64 0, i64 0), align 2
- store i32 %i8, i32* getelementptr inbounds ([8 x i32], [8 x i32]* @arr_3, i64 0, i64 0), align 4
- store i32 %i11, i32* getelementptr inbounds ([0 x i32], [0 x i32]* @arr_2, i64 0, i64 1), align 4
- store i16 %i4, i16* getelementptr inbounds ([0 x i16], [0 x i16]* @arr_4, i64 0, i64 1), align 2
- store i32 %i8, i32* getelementptr inbounds ([8 x i32], [8 x i32]* @arr_3, i64 0, i64 1), align 4
+ store i32 %i11, ptr @arr_2, align 4
+ store i16 %i4, ptr @arr_4, align 2
+ store i32 %i8, ptr @arr_3, align 4
+ store i32 %i11, ptr getelementptr inbounds ([0 x i32], ptr @arr_2, i64 0, i64 1), align 4
+ store i16 %i4, ptr getelementptr inbounds ([0 x i16], ptr @arr_4, i64 0, i64 1), align 2
+ store i32 %i8, ptr getelementptr inbounds ([8 x i32], ptr @arr_3, i64 0, i64 1), align 4
br label %bb12
bb12: ; preds = %bb10, %bb9
; CHECK-NEXT: [[TEST:%.*]] = icmp sgt i32 [[OTHER:%.*]], 0
; CHECK-NEXT: ret i1 [[TEST]]
;
- %positive = load i32, i32* @g, !range !{i32 1, i32 2048}
+ %positive = load i32, ptr @g, !range !{i32 1, i32 2048}
%cmp = icmp slt i32 %positive, %other
%sel = select i1 %cmp, i32 %positive, i32 %other
%test = icmp sgt i32 %sel, 0
; CHECK-NEXT: [[TEST:%.*]] = icmp sgt i32 [[OTHER:%.*]], 0
; CHECK-NEXT: ret i1 [[TEST]]
;
- %positive = load i32, i32* @g, !range !{i32 1, i32 2048}
+ %positive = load i32, ptr @g, !range !{i32 1, i32 2048}
%smin = call i32 @llvm.smin.i32(i32 %positive, i32 %other)
%test = icmp sgt i32 %smin, 0
ret i1 %test
; CHECK-NEXT: [[TEST:%.*]] = icmp sgt i32 [[OTHER:%.*]], 0
; CHECK-NEXT: ret i1 [[TEST]]
;
- %positive = load i32, i32* @g, !range !{i32 1, i32 2048}
+ %positive = load i32, ptr @g, !range !{i32 1, i32 2048}
%cmp = icmp slt i32 %other, %positive
%sel = select i1 %cmp, i32 %other, i32 %positive
%test = icmp sgt i32 %sel, 0
define i1 @maybe_not_positive(i32 %other) {
; CHECK-LABEL: @maybe_not_positive(
-; CHECK-NEXT: [[POSITIVE:%.*]] = load i32, i32* @g, align 4, !range [[RNG0:![0-9]+]]
+; CHECK-NEXT: [[POSITIVE:%.*]] = load i32, ptr @g, align 4, !range [[RNG0:![0-9]+]]
; CHECK-NEXT: [[TMP1:%.*]] = call i32 @llvm.smin.i32(i32 [[POSITIVE]], i32 [[OTHER:%.*]])
; CHECK-NEXT: [[TEST:%.*]] = icmp sgt i32 [[TMP1]], 0
; CHECK-NEXT: ret i1 [[TEST]]
;
- %positive = load i32, i32* @g, !range !{i32 0, i32 2048}
+ %positive = load i32, ptr @g, !range !{i32 0, i32 2048}
%cmp = icmp slt i32 %positive, %other
%sel = select i1 %cmp, i32 %positive, i32 %other
%test = icmp sgt i32 %sel, 0
; Negative test
-define i32 @add_umin_extra_use(i32 %x, i32* %p) {
+define i32 @add_umin_extra_use(i32 %x, ptr %p) {
; CHECK-LABEL: @add_umin_extra_use(
; CHECK-NEXT: [[A:%.*]] = add nuw i32 [[X:%.*]], 15
-; CHECK-NEXT: store i32 [[A]], i32* [[P:%.*]], align 4
+; CHECK-NEXT: store i32 [[A]], ptr [[P:%.*]], align 4
; CHECK-NEXT: [[TMP1:%.*]] = call i32 @llvm.umin.i32(i32 [[A]], i32 42)
; CHECK-NEXT: ret i32 [[TMP1]]
;
%a = add nuw i32 %x, 15
- store i32 %a, i32* %p
+ store i32 %a, ptr %p
%c = icmp ult i32 %a, 42
%r = select i1 %c, i32 %a, i32 42
ret i32 %r
; Negative test
-define i32 @add_umax_extra_use(i32 %x, i32* %p) {
+define i32 @add_umax_extra_use(i32 %x, ptr %p) {
; CHECK-LABEL: @add_umax_extra_use(
; CHECK-NEXT: [[A:%.*]] = add nuw i32 [[X:%.*]], 15
-; CHECK-NEXT: store i32 [[A]], i32* [[P:%.*]], align 4
+; CHECK-NEXT: store i32 [[A]], ptr [[P:%.*]], align 4
; CHECK-NEXT: [[TMP1:%.*]] = call i32 @llvm.umax.i32(i32 [[A]], i32 42)
; CHECK-NEXT: ret i32 [[TMP1]]
;
%a = add nuw i32 %x, 15
- store i32 %a, i32* %p
+ store i32 %a, ptr %p
%c = icmp ugt i32 %a, 42
%r = select i1 %c, i32 %a, i32 42
ret i32 %r
; Negative test
-define i32 @add_smin_extra_use(i32 %x, i32* %p) {
+define i32 @add_smin_extra_use(i32 %x, ptr %p) {
; CHECK-LABEL: @add_smin_extra_use(
; CHECK-NEXT: [[A:%.*]] = add nsw i32 [[X:%.*]], 15
-; CHECK-NEXT: store i32 [[A]], i32* [[P:%.*]], align 4
+; CHECK-NEXT: store i32 [[A]], ptr [[P:%.*]], align 4
; CHECK-NEXT: [[TMP1:%.*]] = call i32 @llvm.smin.i32(i32 [[A]], i32 42)
; CHECK-NEXT: ret i32 [[TMP1]]
;
%a = add nsw i32 %x, 15
- store i32 %a, i32* %p
+ store i32 %a, ptr %p
%c = icmp slt i32 %a, 42
%r = select i1 %c, i32 %a, i32 42
ret i32 %r
; Negative test
-define i32 @add_smax_extra_use(i32 %x, i32* %p) {
+define i32 @add_smax_extra_use(i32 %x, ptr %p) {
; CHECK-LABEL: @add_smax_extra_use(
; CHECK-NEXT: [[A:%.*]] = add nsw i32 [[X:%.*]], 15
-; CHECK-NEXT: store i32 [[A]], i32* [[P:%.*]], align 4
+; CHECK-NEXT: store i32 [[A]], ptr [[P:%.*]], align 4
; CHECK-NEXT: [[TMP1:%.*]] = call i32 @llvm.smax.i32(i32 [[A]], i32 42)
; CHECK-NEXT: ret i32 [[TMP1]]
;
%a = add nsw i32 %x, 15
- store i32 %a, i32* %p
+ store i32 %a, ptr %p
%c = icmp sgt i32 %a, 42
%r = select i1 %c, i32 %a, i32 42
ret i32 %r
define i8 @umin_of_not_and_nontrivial_const(i8 %x) {
; CHECK-LABEL: @umin_of_not_and_nontrivial_const(
; CHECK-NEXT: [[NOTX:%.*]] = xor i8 [[X:%.*]], -1
-; CHECK-NEXT: [[M:%.*]] = call i8 @llvm.umin.i8(i8 [[NOTX]], i8 ptrtoint (i8 (i8)* @umin_of_not_and_nontrivial_const to i8))
+; CHECK-NEXT: [[M:%.*]] = call i8 @llvm.umin.i8(i8 [[NOTX]], i8 ptrtoint (ptr @umin_of_not_and_nontrivial_const to i8))
; CHECK-NEXT: ret i8 [[M]]
;
%notx = xor i8 %x, -1
- %m = call i8 @llvm.umin.i8(i8 ptrtoint (i8(i8)* @umin_of_not_and_nontrivial_const to i8), i8 %notx)
+ %m = call i8 @llvm.umin.i8(i8 ptrtoint (ptr @umin_of_not_and_nontrivial_const to i8), i8 %notx)
ret i8 %m
}
define i8 @umax_umax_reassoc_constantexpr_sink(i8 %x, i8 %y) {
; CHECK-LABEL: @umax_umax_reassoc_constantexpr_sink(
-; CHECK-NEXT: [[M1:%.*]] = call i8 @llvm.umax.i8(i8 [[X:%.*]], i8 ptrtoint (i8 (i8, i8)* @umax_umax_reassoc_constantexpr_sink to i8))
+; CHECK-NEXT: [[M1:%.*]] = call i8 @llvm.umax.i8(i8 [[X:%.*]], i8 ptrtoint (ptr @umax_umax_reassoc_constantexpr_sink to i8))
; CHECK-NEXT: [[M2:%.*]] = call i8 @llvm.umax.i8(i8 [[M1]], i8 42)
; CHECK-NEXT: ret i8 [[M2]]
;
%m1 = call i8 @llvm.umax.i8(i8 %x, i8 42)
- %m2 = call i8 @llvm.umax.i8(i8 %m1, i8 ptrtoint (i8 (i8, i8)* @umax_umax_reassoc_constantexpr_sink to i8))
+ %m2 = call i8 @llvm.umax.i8(i8 %m1, i8 ptrtoint (ptr @umax_umax_reassoc_constantexpr_sink to i8))
ret i8 %m2
}
define i1 @PR57986() {
; CHECK-LABEL: @PR57986(
-; CHECK-NEXT: ret i1 ptrtoint (i32* @g to i1)
+; CHECK-NEXT: ret i1 ptrtoint (ptr @g to i1)
;
- %umin = call i1 @llvm.umin.i1(i1 ptrtoint (i32* @g to i1), i1 true)
+ %umin = call i1 @llvm.umin.i1(i1 ptrtoint (ptr @g to i1), i1 true)
ret i1 %umin
}
; CHECK-LABEL: @reduce_precision_multi_use_0(
; CHECK-NEXT: [[X_EXT:%.*]] = fpext float [[X:%.*]] to double
; CHECK-NEXT: [[Y_EXT:%.*]] = fpext float [[Y:%.*]] to double
-; CHECK-NEXT: store double [[X_EXT]], double* undef, align 8
+; CHECK-NEXT: store double [[X_EXT]], ptr undef, align 8
; CHECK-NEXT: [[MINNUM:%.*]] = call double @llvm.minnum.f64(double [[X_EXT]], double [[Y_EXT]])
; CHECK-NEXT: [[TRUNC:%.*]] = fptrunc double [[MINNUM]] to float
; CHECK-NEXT: ret float [[TRUNC]]
;
%x.ext = fpext float %x to double
%y.ext = fpext float %y to double
- store double %x.ext, double* undef
+ store double %x.ext, ptr undef
%minnum = call double @llvm.minnum.f64(double %x.ext, double %y.ext)
%trunc = fptrunc double %minnum to float
ret float %trunc
; CHECK-LABEL: @reduce_precision_multi_use_1(
; CHECK-NEXT: [[X_EXT:%.*]] = fpext float [[X:%.*]] to double
; CHECK-NEXT: [[Y_EXT:%.*]] = fpext float [[Y:%.*]] to double
-; CHECK-NEXT: store double [[Y_EXT]], double* undef, align 8
+; CHECK-NEXT: store double [[Y_EXT]], ptr undef, align 8
; CHECK-NEXT: [[MINNUM:%.*]] = call double @llvm.minnum.f64(double [[X_EXT]], double [[Y_EXT]])
; CHECK-NEXT: [[TRUNC:%.*]] = fptrunc double [[MINNUM]] to float
; CHECK-NEXT: ret float [[TRUNC]]
;
%x.ext = fpext float %x to double
%y.ext = fpext float %y to double
- store double %y.ext, double* undef
+ store double %y.ext, ptr undef
%minnum = call double @llvm.minnum.f64(double %x.ext, double %y.ext)
%trunc = fptrunc double %minnum to float
ret float %trunc
@PR22087 = external global i32
define i32 @test31(i32 %V) {
; CHECK-LABEL: @test31(
-; CHECK-NEXT: [[MUL:%.*]] = shl i32 [[V:%.*]], zext (i1 icmp ne (i32* inttoptr (i64 1 to i32*), i32* @PR22087) to i32)
+; CHECK-NEXT: [[MUL:%.*]] = shl i32 [[V:%.*]], zext (i1 icmp ne (ptr inttoptr (i64 1 to ptr), ptr @PR22087) to i32)
; CHECK-NEXT: ret i32 [[MUL]]
;
- %mul = mul i32 %V, shl (i32 1, i32 zext (i1 icmp ne (i32* inttoptr (i64 1 to i32*), i32* @PR22087) to i32))
+ %mul = mul i32 %V, shl (i32 1, i32 zext (i1 icmp ne (ptr inttoptr (i64 1 to ptr), ptr @PR22087) to i32))
ret i32 %mul
}
; Check we do not undo the canonicalization of 0 - (X * Y), if Y is a constant
; expr.
; CHECK-LABEL: @test_mul_canonicalize_neg_is_not_undone(
-; CHECK-NEXT: [[TMP1:%.*]] = mul i64 [[L1:%.*]], ptrtoint (i32* @X to i64)
+; CHECK-NEXT: [[TMP1:%.*]] = mul i64 [[L1:%.*]], ptrtoint (ptr @X to i64)
; CHECK-NEXT: [[B4:%.*]] = sub i64 0, [[TMP1]]
; CHECK-NEXT: ret i64 [[B4]]
;
- %v1 = ptrtoint i32* @X to i64
+ %v1 = ptrtoint ptr @X to i64
%B8 = sub i64 0, %v1
%B4 = mul i64 %B8, %L1
ret i64 %B4
define i32 @PR20079(i32 %a) {
; CHECK-LABEL: @PR20079(
; CHECK-NEXT: [[ADD:%.*]] = add i32 [[A:%.*]], -1
-; CHECK-NEXT: [[MUL:%.*]] = mul nsw i32 [[ADD]], ptrtoint (i32* @g to i32)
+; CHECK-NEXT: [[MUL:%.*]] = mul nsw i32 [[ADD]], ptrtoint (ptr @g to i32)
; CHECK-NEXT: ret i32 [[MUL]]
;
%add = add i32 %a, -1
- %mul = mul nsw i32 %add, ptrtoint (i32* @g to i32)
+ %mul = mul nsw i32 %add, ptrtoint (ptr @g to i32)
ret i32 %mul
}
@PR22087 = external global i32
define i32 @test31(i32 %V) {
; CHECK-LABEL: @test31(
-; CHECK-NEXT: [[MUL:%.*]] = shl i32 [[V:%.*]], zext (i1 icmp ne (i32* inttoptr (i64 1 to i32*), i32* @PR22087) to i32)
+; CHECK-NEXT: [[MUL:%.*]] = shl i32 [[V:%.*]], zext (i1 icmp ne (ptr inttoptr (i64 1 to ptr), ptr @PR22087) to i32)
; CHECK-NEXT: ret i32 [[MUL]]
;
- %mul = mul i32 %V, shl (i32 1, i32 zext (i1 icmp ne (i32* inttoptr (i64 1 to i32*), i32* @PR22087) to i32))
+ %mul = mul i32 %V, shl (i32 1, i32 zext (i1 icmp ne (ptr inttoptr (i64 1 to ptr), ptr @PR22087) to i32))
ret i32 %mul
}
; Check we do not undo the canonicalization of 0 - (X * Y), if Y is a constant
; expr.
; CHECK-LABEL: @test_mul_canonicalize_neg_is_not_undone(
-; CHECK-NEXT: [[TMP1:%.*]] = mul i64 [[L1:%.*]], ptrtoint (i32* @X to i64)
+; CHECK-NEXT: [[TMP1:%.*]] = mul i64 [[L1:%.*]], ptrtoint (ptr @X to i64)
; CHECK-NEXT: [[B4:%.*]] = sub i64 0, [[TMP1]]
; CHECK-NEXT: ret i64 [[B4]]
;
- %v1 = ptrtoint i32* @X to i64
+ %v1 = ptrtoint ptr @X to i64
%B8 = sub i64 0, %v1
%B4 = mul i64 %B8, %L1
ret i64 %B4
target datalayout = "e-p:32:32:32-p1:64:64:64-p2:8:8:8-p3:16:16:16-p4:16:16:16-i1:8:8-i8:8:8-i16:16:16-i32:32:32-i64:32:32"
-define i32 @test_as0(i32 addrspace(0)* %a) {
+define i32 @test_as0(ptr addrspace(0) %a) {
; CHECK-LABEL: @test_as0(
-; CHECK-NEXT: [[ARRAYIDX:%.*]] = getelementptr i32, i32* [[A:%.*]], i32 1
-; CHECK-NEXT: [[Y:%.*]] = load i32, i32* [[ARRAYIDX]], align 4
+; CHECK-NEXT: [[ARRAYIDX:%.*]] = getelementptr i32, ptr [[A:%.*]], i32 1
+; CHECK-NEXT: [[Y:%.*]] = load i32, ptr [[ARRAYIDX]], align 4
; CHECK-NEXT: ret i32 [[Y]]
;
- %arrayidx = getelementptr i32, i32 addrspace(0)* %a, i64 1
- %y = load i32, i32 addrspace(0)* %arrayidx, align 4
+ %arrayidx = getelementptr i32, ptr addrspace(0) %a, i64 1
+ %y = load i32, ptr addrspace(0) %arrayidx, align 4
ret i32 %y
}
-define i32 @test_as1(i32 addrspace(1)* %a) {
+define i32 @test_as1(ptr addrspace(1) %a) {
; CHECK-LABEL: @test_as1(
-; CHECK-NEXT: [[ARRAYIDX:%.*]] = getelementptr i32, i32 addrspace(1)* [[A:%.*]], i64 1
-; CHECK-NEXT: [[Y:%.*]] = load i32, i32 addrspace(1)* [[ARRAYIDX]], align 4
+; CHECK-NEXT: [[ARRAYIDX:%.*]] = getelementptr i32, ptr addrspace(1) [[A:%.*]], i64 1
+; CHECK-NEXT: [[Y:%.*]] = load i32, ptr addrspace(1) [[ARRAYIDX]], align 4
; CHECK-NEXT: ret i32 [[Y]]
;
- %arrayidx = getelementptr i32, i32 addrspace(1)* %a, i32 1
- %y = load i32, i32 addrspace(1)* %arrayidx, align 4
+ %arrayidx = getelementptr i32, ptr addrspace(1) %a, i32 1
+ %y = load i32, ptr addrspace(1) %arrayidx, align 4
ret i32 %y
}
-define i32 @test_as2(i32 addrspace(2)* %a) {
+define i32 @test_as2(ptr addrspace(2) %a) {
; CHECK-LABEL: @test_as2(
-; CHECK-NEXT: [[ARRAYIDX:%.*]] = getelementptr i32, i32 addrspace(2)* [[A:%.*]], i8 1
-; CHECK-NEXT: [[Y:%.*]] = load i32, i32 addrspace(2)* [[ARRAYIDX]], align 4
+; CHECK-NEXT: [[ARRAYIDX:%.*]] = getelementptr i32, ptr addrspace(2) [[A:%.*]], i8 1
+; CHECK-NEXT: [[Y:%.*]] = load i32, ptr addrspace(2) [[ARRAYIDX]], align 4
; CHECK-NEXT: ret i32 [[Y]]
;
- %arrayidx = getelementptr i32, i32 addrspace(2)* %a, i32 1
- %y = load i32, i32 addrspace(2)* %arrayidx, align 4
+ %arrayidx = getelementptr i32, ptr addrspace(2) %a, i32 1
+ %y = load i32, ptr addrspace(2) %arrayidx, align 4
ret i32 %y
}
-define i32 @test_as3(i32 addrspace(3)* %a) {
+define i32 @test_as3(ptr addrspace(3) %a) {
; CHECK-LABEL: @test_as3(
-; CHECK-NEXT: [[ARRAYIDX:%.*]] = getelementptr i32, i32 addrspace(3)* [[A:%.*]], i16 1
-; CHECK-NEXT: [[Y:%.*]] = load i32, i32 addrspace(3)* [[ARRAYIDX]], align 4
+; CHECK-NEXT: [[ARRAYIDX:%.*]] = getelementptr i32, ptr addrspace(3) [[A:%.*]], i16 1
+; CHECK-NEXT: [[Y:%.*]] = load i32, ptr addrspace(3) [[ARRAYIDX]], align 4
; CHECK-NEXT: ret i32 [[Y]]
;
- %arrayidx = getelementptr i32, i32 addrspace(3)* %a, i32 1
- %y = load i32, i32 addrspace(3)* %arrayidx, align 4
+ %arrayidx = getelementptr i32, ptr addrspace(3) %a, i32 1
+ %y = load i32, ptr addrspace(3) %arrayidx, align 4
ret i32 %y
}
-define i32 @test_combine_ptrtoint(i32 addrspace(2)* %a) {
+define i32 @test_combine_ptrtoint(ptr addrspace(2) %a) {
; CHECK-LABEL: @test_combine_ptrtoint(
-; CHECK-NEXT: [[Y:%.*]] = load i32, i32 addrspace(2)* [[A:%.*]], align 4
+; CHECK-NEXT: [[Y:%.*]] = load i32, ptr addrspace(2) [[A:%.*]], align 4
; CHECK-NEXT: ret i32 [[Y]]
;
- %cast = ptrtoint i32 addrspace(2)* %a to i8
- %castback = inttoptr i8 %cast to i32 addrspace(2)*
- %y = load i32, i32 addrspace(2)* %castback, align 4
+ %cast = ptrtoint ptr addrspace(2) %a to i8
+ %castback = inttoptr i8 %cast to ptr addrspace(2)
+ %y = load i32, ptr addrspace(2) %castback, align 4
ret i32 %y
}
; CHECK-LABEL: @test_combine_inttoptr(
; CHECK-NEXT: ret i8 [[A:%.*]]
;
- %cast = inttoptr i8 %a to i32 addrspace(2)*
- %castback = ptrtoint i32 addrspace(2)* %cast to i8
+ %cast = inttoptr i8 %a to ptr addrspace(2)
+ %castback = ptrtoint ptr addrspace(2) %cast to i8
ret i8 %castback
}
-define i32 @test_combine_vector_ptrtoint(<2 x i32 addrspace(2)*> %a) {
+define i32 @test_combine_vector_ptrtoint(<2 x ptr addrspace(2)> %a) {
; CHECK-LABEL: @test_combine_vector_ptrtoint(
-; CHECK-NEXT: [[P:%.*]] = extractelement <2 x i32 addrspace(2)*> [[A:%.*]], i64 0
-; CHECK-NEXT: [[Y:%.*]] = load i32, i32 addrspace(2)* [[P]], align 4
+; CHECK-NEXT: [[P:%.*]] = extractelement <2 x ptr addrspace(2)> [[A:%.*]], i64 0
+; CHECK-NEXT: [[Y:%.*]] = load i32, ptr addrspace(2) [[P]], align 4
; CHECK-NEXT: ret i32 [[Y]]
;
- %cast = ptrtoint <2 x i32 addrspace(2)*> %a to <2 x i8>
- %castback = inttoptr <2 x i8> %cast to <2 x i32 addrspace(2)*>
- %p = extractelement <2 x i32 addrspace(2)*> %castback, i32 0
- %y = load i32, i32 addrspace(2)* %p, align 4
+ %cast = ptrtoint <2 x ptr addrspace(2)> %a to <2 x i8>
+ %castback = inttoptr <2 x i8> %cast to <2 x ptr addrspace(2)>
+ %p = extractelement <2 x ptr addrspace(2)> %castback, i32 0
+ %y = load i32, ptr addrspace(2) %p, align 4
ret i32 %y
}
; CHECK-LABEL: @test_combine_vector_inttoptr(
; CHECK-NEXT: ret <2 x i8> [[A:%.*]]
;
- %cast = inttoptr <2 x i8> %a to <2 x i32 addrspace(2)*>
- %castback = ptrtoint <2 x i32 addrspace(2)*> %cast to <2 x i8>
+ %cast = inttoptr <2 x i8> %a to <2 x ptr addrspace(2)>
+ %castback = ptrtoint <2 x ptr addrspace(2)> %cast to <2 x i8>
ret <2 x i8> %castback
}
; Check that the GEP index is changed to the address space integer type (i64 -> i8)
-define i32 addrspace(2)* @shrink_gep_constant_index_64_as2(i32 addrspace(2)* %p) {
+define ptr addrspace(2) @shrink_gep_constant_index_64_as2(ptr addrspace(2) %p) {
; CHECK-LABEL: @shrink_gep_constant_index_64_as2(
-; CHECK-NEXT: [[RET:%.*]] = getelementptr i32, i32 addrspace(2)* [[P:%.*]], i8 1
-; CHECK-NEXT: ret i32 addrspace(2)* [[RET]]
+; CHECK-NEXT: [[RET:%.*]] = getelementptr i32, ptr addrspace(2) [[P:%.*]], i8 1
+; CHECK-NEXT: ret ptr addrspace(2) [[RET]]
;
- %ret = getelementptr i32, i32 addrspace(2)* %p, i64 1
- ret i32 addrspace(2)* %ret
+ %ret = getelementptr i32, ptr addrspace(2) %p, i64 1
+ ret ptr addrspace(2) %ret
}
-define i32 addrspace(2)* @shrink_gep_constant_index_32_as2(i32 addrspace(2)* %p) {
+define ptr addrspace(2) @shrink_gep_constant_index_32_as2(ptr addrspace(2) %p) {
; CHECK-LABEL: @shrink_gep_constant_index_32_as2(
-; CHECK-NEXT: [[RET:%.*]] = getelementptr i32, i32 addrspace(2)* [[P:%.*]], i8 1
-; CHECK-NEXT: ret i32 addrspace(2)* [[RET]]
+; CHECK-NEXT: [[RET:%.*]] = getelementptr i32, ptr addrspace(2) [[P:%.*]], i8 1
+; CHECK-NEXT: ret ptr addrspace(2) [[RET]]
;
- %ret = getelementptr i32, i32 addrspace(2)* %p, i32 1
- ret i32 addrspace(2)* %ret
+ %ret = getelementptr i32, ptr addrspace(2) %p, i32 1
+ ret ptr addrspace(2) %ret
}
-define i32 addrspace(3)* @shrink_gep_constant_index_64_as3(i32 addrspace(3)* %p) {
+define ptr addrspace(3) @shrink_gep_constant_index_64_as3(ptr addrspace(3) %p) {
; CHECK-LABEL: @shrink_gep_constant_index_64_as3(
-; CHECK-NEXT: [[RET:%.*]] = getelementptr i32, i32 addrspace(3)* [[P:%.*]], i16 1
-; CHECK-NEXT: ret i32 addrspace(3)* [[RET]]
+; CHECK-NEXT: [[RET:%.*]] = getelementptr i32, ptr addrspace(3) [[P:%.*]], i16 1
+; CHECK-NEXT: ret ptr addrspace(3) [[RET]]
;
- %ret = getelementptr i32, i32 addrspace(3)* %p, i64 1
- ret i32 addrspace(3)* %ret
+ %ret = getelementptr i32, ptr addrspace(3) %p, i64 1
+ ret ptr addrspace(3) %ret
}
-define i32 addrspace(2)* @shrink_gep_variable_index_64_as2(i32 addrspace(2)* %p, i64 %idx) {
+define ptr addrspace(2) @shrink_gep_variable_index_64_as2(ptr addrspace(2) %p, i64 %idx) {
; CHECK-LABEL: @shrink_gep_variable_index_64_as2(
; CHECK-NEXT: [[TMP1:%.*]] = trunc i64 [[IDX:%.*]] to i8
-; CHECK-NEXT: [[RET:%.*]] = getelementptr i32, i32 addrspace(2)* [[P:%.*]], i8 [[TMP1]]
-; CHECK-NEXT: ret i32 addrspace(2)* [[RET]]
+; CHECK-NEXT: [[RET:%.*]] = getelementptr i32, ptr addrspace(2) [[P:%.*]], i8 [[TMP1]]
+; CHECK-NEXT: ret ptr addrspace(2) [[RET]]
;
- %ret = getelementptr i32, i32 addrspace(2)* %p, i64 %idx
- ret i32 addrspace(2)* %ret
+ %ret = getelementptr i32, ptr addrspace(2) %p, i64 %idx
+ ret ptr addrspace(2) %ret
}
-define i32 addrspace(1)* @grow_gep_variable_index_8_as1(i32 addrspace(1)* %p, i8 %idx) {
+define ptr addrspace(1) @grow_gep_variable_index_8_as1(ptr addrspace(1) %p, i8 %idx) {
; CHECK-LABEL: @grow_gep_variable_index_8_as1(
; CHECK-NEXT: [[TMP1:%.*]] = sext i8 [[IDX:%.*]] to i64
-; CHECK-NEXT: [[RET:%.*]] = getelementptr i32, i32 addrspace(1)* [[P:%.*]], i64 [[TMP1]]
-; CHECK-NEXT: ret i32 addrspace(1)* [[RET]]
+; CHECK-NEXT: [[RET:%.*]] = getelementptr i32, ptr addrspace(1) [[P:%.*]], i64 [[TMP1]]
+; CHECK-NEXT: ret ptr addrspace(1) [[RET]]
;
- %ret = getelementptr i32, i32 addrspace(1)* %p, i8 %idx
- ret i32 addrspace(1)* %ret
+ %ret = getelementptr i32, ptr addrspace(1) %p, i8 %idx
+ ret ptr addrspace(1) %ret
}
target datalayout = "e-m:e-p270:32:32-p271:32:32-p272:64:64-i64:64-f80:128-n8:16:32:64-S128"
; Positive test - all uses are identical casts.
-define void @t0(i1 zeroext %c0, i1 zeroext %c1, i64* nocapture readonly %src) {
+define void @t0(i1 zeroext %c0, i1 zeroext %c1, ptr nocapture readonly %src) {
; CHECK-LABEL: @t0(
; CHECK-NEXT: bb:
-; CHECK-NEXT: [[DATA:%.*]] = load i64, i64* [[SRC:%.*]], align 8
+; CHECK-NEXT: [[DATA:%.*]] = load i64, ptr [[SRC:%.*]], align 8
; CHECK-NEXT: br i1 [[C0:%.*]], label [[BB3:%.*]], label [[BB7:%.*]]
; CHECK: bb3:
; CHECK-NEXT: br i1 [[C1:%.*]], label [[BB4:%.*]], label [[BB5:%.*]]
; CHECK-NEXT: tail call void @abort()
; CHECK-NEXT: unreachable
; CHECK: bb5:
-; CHECK-NEXT: [[PTR0:%.*]] = inttoptr i64 [[DATA]] to i32*
-; CHECK-NEXT: tail call void @sink0(i32* [[PTR0]])
+; CHECK-NEXT: [[PTR0:%.*]] = inttoptr i64 [[DATA]] to ptr
+; CHECK-NEXT: tail call void @sink0(ptr [[PTR0]])
; CHECK-NEXT: br label [[BB9:%.*]]
; CHECK: bb7:
-; CHECK-NEXT: [[PTR1:%.*]] = inttoptr i64 [[DATA]] to i32*
-; CHECK-NEXT: tail call void @sink1(i32* [[PTR1]])
+; CHECK-NEXT: [[PTR1:%.*]] = inttoptr i64 [[DATA]] to ptr
+; CHECK-NEXT: tail call void @sink1(ptr [[PTR1]])
; CHECK-NEXT: br label [[BB9]]
; CHECK: bb9:
; CHECK-NEXT: ret void
;
bb:
- %data = load i64, i64* %src, align 8
+ %data = load i64, ptr %src, align 8
br i1 %c0, label %bb3, label %bb7
bb3:
unreachable
bb5:
- %ptr0 = inttoptr i64 %data to i32*
- tail call void @sink0(i32* %ptr0)
+ %ptr0 = inttoptr i64 %data to ptr
+ tail call void @sink0(ptr %ptr0)
br label %bb9
bb7:
- %ptr1 = inttoptr i64 %data to i32*
- tail call void @sink1(i32* %ptr1)
+ %ptr1 = inttoptr i64 %data to ptr
+ tail call void @sink1(ptr %ptr1)
br label %bb9
bb9:
}
; Negative test - all uses are casts, but non-identical ones.
-define void @n1(i1 zeroext %c0, i1 zeroext %c1, i64* nocapture readonly %src) {
+define void @n1(i1 zeroext %c0, i1 zeroext %c1, ptr nocapture readonly %src) {
; CHECK-LABEL: @n1(
; CHECK-NEXT: bb:
-; CHECK-NEXT: [[DATA:%.*]] = load i64, i64* [[SRC:%.*]], align 8
+; CHECK-NEXT: [[DATA:%.*]] = load i64, ptr [[SRC:%.*]], align 8
; CHECK-NEXT: br i1 [[C0:%.*]], label [[BB3:%.*]], label [[BB7:%.*]]
; CHECK: bb3:
; CHECK-NEXT: br i1 [[C1:%.*]], label [[BB4:%.*]], label [[BB5:%.*]]
; CHECK-NEXT: tail call void @abort()
; CHECK-NEXT: unreachable
; CHECK: bb5:
-; CHECK-NEXT: [[PTR0:%.*]] = inttoptr i64 [[DATA]] to i32*
-; CHECK-NEXT: tail call void @sink0(i32* [[PTR0]])
+; CHECK-NEXT: [[PTR0:%.*]] = inttoptr i64 [[DATA]] to ptr
+; CHECK-NEXT: tail call void @sink0(ptr [[PTR0]])
; CHECK-NEXT: br label [[BB9:%.*]]
; CHECK: bb7:
; CHECK-NEXT: [[VEC:%.*]] = bitcast i64 [[DATA]] to <2 x i32>
; CHECK-NEXT: ret void
;
bb:
- %data = load i64, i64* %src, align 8
+ %data = load i64, ptr %src, align 8
br i1 %c0, label %bb3, label %bb7
bb3:
unreachable
bb5:
- %ptr0 = inttoptr i64 %data to i32*
- tail call void @sink0(i32* %ptr0)
+ %ptr0 = inttoptr i64 %data to ptr
+ tail call void @sink0(ptr %ptr0)
br label %bb9
bb7:
}
; Negative test - have non-cast users.
-define void @n2(i1 zeroext %c0, i1 zeroext %c1, i64* nocapture readonly %src) {
+define void @n2(i1 zeroext %c0, i1 zeroext %c1, ptr nocapture readonly %src) {
; CHECK-LABEL: @n2(
; CHECK-NEXT: bb:
-; CHECK-NEXT: [[DATA:%.*]] = load i64, i64* [[SRC:%.*]], align 8
+; CHECK-NEXT: [[DATA:%.*]] = load i64, ptr [[SRC:%.*]], align 8
; CHECK-NEXT: br i1 [[C0:%.*]], label [[BB3:%.*]], label [[BB7:%.*]]
; CHECK: bb3:
; CHECK-NEXT: br i1 [[C1:%.*]], label [[BB4:%.*]], label [[BB5:%.*]]
; CHECK-NEXT: tail call void @abort()
; CHECK-NEXT: unreachable
; CHECK: bb5:
-; CHECK-NEXT: [[PTR0:%.*]] = inttoptr i64 [[DATA]] to i32*
-; CHECK-NEXT: tail call void @sink0(i32* [[PTR0]])
+; CHECK-NEXT: [[PTR0:%.*]] = inttoptr i64 [[DATA]] to ptr
+; CHECK-NEXT: tail call void @sink0(ptr [[PTR0]])
; CHECK-NEXT: br label [[BB9:%.*]]
; CHECK: bb7:
; CHECK-NEXT: tail call void @sink3(i64 [[DATA]])
; CHECK-NEXT: ret void
;
bb:
- %data = load i64, i64* %src, align 8
+ %data = load i64, ptr %src, align 8
br i1 %c0, label %bb3, label %bb7
bb3:
unreachable
bb5:
- %ptr0 = inttoptr i64 %data to i32*
- tail call void @sink0(i32* %ptr0)
+ %ptr0 = inttoptr i64 %data to ptr
+ tail call void @sink0(ptr %ptr0)
br label %bb9
bb7:
declare void @abort()
-declare void @sink0(i32*)
+declare void @sink0(ptr)
-declare void @sink1(i32*)
+declare void @sink1(ptr)
declare void @sink2(<2 x i32>)
; NOTE: Assertions have been autogenerated by utils/update_test_checks.py
; RUN: opt < %s -passes=instcombine -S -data-layout="E-m:e-i1:8:16-i8:8:16-i64:64-f128:64-v128:64-a:8:16-n32:64" | FileCheck %s
-define void @PR35618(i64* %st1, double* %st2) {
+define void @PR35618(ptr %st1, ptr %st2) {
; CHECK-LABEL: @PR35618(
; CHECK-NEXT: [[Y1:%.*]] = alloca double, align 8
; CHECK-NEXT: [[Z1:%.*]] = alloca double, align 8
-; CHECK-NEXT: [[LD1:%.*]] = load double, double* [[Y1]], align 8
-; CHECK-NEXT: [[LD2:%.*]] = load double, double* [[Z1]], align 8
+; CHECK-NEXT: [[LD1:%.*]] = load double, ptr [[Y1]], align 8
+; CHECK-NEXT: [[LD2:%.*]] = load double, ptr [[Z1]], align 8
; CHECK-NEXT: [[TMP10:%.*]] = fcmp olt double [[LD1]], [[LD2]]
; CHECK-NEXT: [[TMP121:%.*]] = select i1 [[TMP10]], double [[LD1]], double [[LD2]]
-; CHECK-NEXT: [[TMP1:%.*]] = bitcast i64* [[ST1:%.*]] to double*
-; CHECK-NEXT: store double [[TMP121]], double* [[TMP1]], align 8
-; CHECK-NEXT: store double [[TMP121]], double* [[ST2:%.*]], align 8
+; CHECK-NEXT: store double [[TMP121]], ptr [[ST1:%.*]], align 8
+; CHECK-NEXT: store double [[TMP121]], ptr [[ST2:%.*]], align 8
; CHECK-NEXT: ret void
;
%y1 = alloca double
%z1 = alloca double
- %ld1 = load double, double* %y1
- %ld2 = load double, double* %z1
+ %ld1 = load double, ptr %y1
+ %ld2 = load double, ptr %z1
%tmp10 = fcmp olt double %ld1, %ld2
- %sel = select i1 %tmp10, double* %y1, double* %z1
- %tmp11 = bitcast double* %sel to i64*
- %tmp12 = load i64, i64* %tmp11
- store i64 %tmp12, i64* %st1
- %bc = bitcast double* %st2 to i64*
- store i64 %tmp12, i64* %bc
+ %sel = select i1 %tmp10, ptr %y1, ptr %z1
+ %tmp12 = load i64, ptr %sel
+ store i64 %tmp12, ptr %st1
+ store i64 %tmp12, ptr %st2
ret void
}
; alone.
define i32 @call_thunk(i32 %x, i32 %y) {
- %r = call i32 bitcast (void (i32, ...)* @inc_first_arg_thunk to i32 (i32, i32)*)(i32 %x, i32 %y)
+ %r = call i32 @inc_first_arg_thunk(i32 %x, i32 %y)
ret i32 %r
}
; CHECK-LABEL: define i32 @call_thunk(i32 %x, i32 %y)
-; CHECK: %r = call i32 bitcast (void (i32, ...)* @inc_first_arg_thunk to i32 (i32, i32)*)(i32 %x, i32 %y)
+; CHECK: %r = call i32 @inc_first_arg_thunk(i32 %x, i32 %y)
; CHECK: ret i32 %r
define internal void @inc_first_arg_thunk(i32 %arg1, ...) #0 {
entry:
%inc = add i32 %arg1, 1
- musttail call void (i32, ...) bitcast (i32 (i32, i32)* @plus to void (i32, ...)*)(i32 %inc, ...)
+ musttail call void (i32, ...) @plus(i32 %inc, ...)
ret void
}
; CHECK-LABEL: define internal void @inc_first_arg_thunk(i32 %arg1, ...) #0
; CHECK: %inc = add i32 %arg1, 1
-; CHECK: musttail call void (i32, ...) bitcast (i32 (i32, i32)* @plus to void (i32, ...)*)(i32 %inc, ...)
+; CHECK: musttail call void (i32, ...) @plus(i32 %inc, ...)
; CHECK: ret void
define internal i32 @plus(i32 %x, i32 %y) {
]
sw.bb: ; preds = %entry
- store i32 90, i32* %retval, align 4
+ store i32 90, ptr %retval, align 4
br label %return
sw.bb1: ; preds = %entry
- store i32 91, i32* %retval, align 4
+ store i32 91, ptr %retval, align 4
br label %return
sw.bb2: ; preds = %entry
- store i32 92, i32* %retval, align 4
+ store i32 92, ptr %retval, align 4
br label %return
sw.epilog: ; preds = %entry
- store i32 113, i32* %retval, align 4
+ store i32 113, ptr %retval, align 4
br label %return
return: ; preds = %sw.epilog, %sw.bb2,
- %rval = load i32, i32* %retval, align 4
+ %rval = load i32, ptr %retval, align 4
ret i32 %rval
}
br label %1
; <label>:1: ; preds = %10, %0
- %2 = load volatile i32, i32* @njob, align 4
+ %2 = load volatile i32, ptr @njob, align 4
%3 = icmp ne i32 %2, 0
br i1 %3, label %4, label %11
]
; <label>:7: ; preds = %4
- store i32 6, i32* @a, align 4
+ store i32 6, ptr @a, align 4
br label %10
; <label>:8: ; preds = %4
- store i32 1, i32* @a, align 4
+ store i32 1, ptr @a, align 4
br label %10
; <label>:9: ; preds = %4
- store i32 2, i32* @a, align 4
+ store i32 2, ptr @a, align 4
br label %10
; <label>:10: ; preds = %13, %12, %11, %10, %9, %8, %7
; This is based on an 'any_of' loop construct.
; By narrowing the phi and logic op, we simplify away the zext and the final icmp.
-define i1 @searchArray1(i32 %needle, i32* %haystack) {
+define i1 @searchArray1(i32 %needle, ptr %haystack) {
; CHECK-LABEL: @searchArray1(
; CHECK-NEXT: entry:
; CHECK-NEXT: br label [[LOOP:%.*]]
; CHECK-NEXT: [[INDVAR:%.*]] = phi i32 [ 0, [[ENTRY:%.*]] ], [ [[INDVAR_NEXT:%.*]], [[LOOP]] ]
; CHECK-NEXT: [[FOUND:%.*]] = phi i8 [ 0, [[ENTRY]] ], [ [[OR:%.*]], [[LOOP]] ]
; CHECK-NEXT: [[TMP0:%.*]] = sext i32 [[INDVAR]] to i64
-; CHECK-NEXT: [[IDX:%.*]] = getelementptr i32, i32* [[HAYSTACK:%.*]], i64 [[TMP0]]
-; CHECK-NEXT: [[LD:%.*]] = load i32, i32* [[IDX]], align 4
+; CHECK-NEXT: [[IDX:%.*]] = getelementptr i32, ptr [[HAYSTACK:%.*]], i64 [[TMP0]]
+; CHECK-NEXT: [[LD:%.*]] = load i32, ptr [[IDX]], align 4
; CHECK-NEXT: [[CMP1:%.*]] = icmp eq i32 [[LD]], [[NEEDLE:%.*]]
; CHECK-NEXT: [[ZEXT:%.*]] = zext i1 [[CMP1]] to i8
; CHECK-NEXT: [[OR]] = or i8 [[FOUND]], [[ZEXT]]
loop:
%indvar = phi i32 [ 0, %entry ], [ %indvar.next, %loop ]
%found = phi i8 [ 0, %entry ], [ %or, %loop ]
- %idx = getelementptr i32, i32* %haystack, i32 %indvar
- %ld = load i32, i32* %idx
+ %idx = getelementptr i32, ptr %haystack, i32 %indvar
+ %ld = load i32, ptr %idx
%cmp1 = icmp eq i32 %ld, %needle
%zext = zext i1 %cmp1 to i8
%or = or i8 %found, %zext
; This is based on an 'all_of' loop construct.
; By narrowing the phi and logic op, we simplify away the zext and the final icmp.
-define i1 @searchArray2(i32 %hay, i32* %haystack) {
+define i1 @searchArray2(i32 %hay, ptr %haystack) {
; CHECK-LABEL: @searchArray2(
; CHECK-NEXT: entry:
; CHECK-NEXT: br label [[LOOP:%.*]]
; CHECK: loop:
; CHECK-NEXT: [[INDVAR:%.*]] = phi i64 [ 0, [[ENTRY:%.*]] ], [ [[INDVAR_NEXT:%.*]], [[LOOP]] ]
; CHECK-NEXT: [[FOUND:%.*]] = phi i8 [ 1, [[ENTRY]] ], [ [[AND:%.*]], [[LOOP]] ]
-; CHECK-NEXT: [[IDX:%.*]] = getelementptr i32, i32* [[HAYSTACK:%.*]], i64 [[INDVAR]]
-; CHECK-NEXT: [[LD:%.*]] = load i32, i32* [[IDX]], align 4
+; CHECK-NEXT: [[IDX:%.*]] = getelementptr i32, ptr [[HAYSTACK:%.*]], i64 [[INDVAR]]
+; CHECK-NEXT: [[LD:%.*]] = load i32, ptr [[IDX]], align 4
; CHECK-NEXT: [[CMP1:%.*]] = icmp eq i32 [[LD]], [[HAY:%.*]]
; CHECK-NEXT: [[ZEXT:%.*]] = zext i1 [[CMP1]] to i8
; CHECK-NEXT: [[AND]] = and i8 [[FOUND]], [[ZEXT]]
loop:
%indvar = phi i64 [ 0, %entry ], [ %indvar.next, %loop ]
%found = phi i8 [ 1, %entry ], [ %and, %loop ]
- %idx = getelementptr i32, i32* %haystack, i64 %indvar
- %ld = load i32, i32* %idx
+ %idx = getelementptr i32, ptr %haystack, i64 %indvar
+ %ld = load i32, ptr %idx
%cmp1 = icmp eq i32 %ld, %hay
%zext = zext i1 %cmp1 to i8
%and = and i8 %found, %zext
; NOTE: Assertions have been autogenerated by utils/update_test_checks.py
; RUN: opt < %s -passes=instcombine -S | FileCheck %s
-declare void @use(i32 *)
+declare void @use(ptr)
define void @foo(i64 %X) {
; Currently we cannot handle expressions of the form Offset - X * Scale.
; CHECK-NEXT: [[TMP1:%.*]] = shl i64 [[X:%.*]], 2
; CHECK-NEXT: [[TMP2:%.*]] = sub i64 24, [[TMP1]]
; CHECK-NEXT: [[TMP3:%.*]] = alloca i8, i64 [[TMP2]], align 4
-; CHECK-NEXT: [[TMP4:%.*]] = bitcast i8* [[TMP3]] to i32*
-; CHECK-NEXT: call void @use(i32* nonnull [[TMP4]])
+; CHECK-NEXT: call void @use(ptr nonnull [[TMP3]])
; CHECK-NEXT: ret void
;
%1 = mul nsw i64 %X, -4
%2 = add nsw i64 %1, 24
%3 = alloca i8, i64 %2, align 4
- %4 = bitcast i8* %3 to i32*
- call void @use(i32 *%4)
+ call void @use(ptr %3)
ret void
}
; operator new(size_t = unsigned int)
-declare i8* @_Znwj(%size_t)
+declare ptr @_Znwj(%size_t)
; operator new[](size_t = unsigned int)
-declare i8* @_Znaj(%size_t)
+declare ptr @_Znaj(%size_t)
; operator new(size_t = unsigned int, std::align_val_t)
-declare i8* @_ZnwjSt11align_val_t(%size_t, %size_t)
+declare ptr @_ZnwjSt11align_val_t(%size_t, %size_t)
; operator new[](size_t = unsigned int, std::align_val_t)
-declare i8* @_ZnajSt11align_val_t(%size_t, %size_t)
+declare ptr @_ZnajSt11align_val_t(%size_t, %size_t)
; operator new(size_t = unsigned int, std::align_val_t, const std::nothrow_t&)
-declare i8* @_ZnwjSt11align_val_tRKSt9nothrow_t(%size_t, %size_t, %nothrow_t*)
+declare ptr @_ZnwjSt11align_val_tRKSt9nothrow_t(%size_t, %size_t, ptr)
; operator new[](size_t = unsigned int, std::align_val_t, const std::nothrow_t&)
-declare i8* @_ZnajSt11align_val_tRKSt9nothrow_t(%size_t, %size_t, %nothrow_t*)
+declare ptr @_ZnajSt11align_val_tRKSt9nothrow_t(%size_t, %size_t, ptr)
-; operator delete(void*, size_t = unsigned int)
-declare void @_ZdlPvj(i8*, %size_t)
+; operator delete(ptr, size_t = unsigned int)
+declare void @_ZdlPvj(ptr, %size_t)
-; operator delete[](void*, size_t = unsigned int)
-declare void @_ZdaPvj(i8*, %size_t)
+; operator delete[](ptr, size_t = unsigned int)
+declare void @_ZdaPvj(ptr, %size_t)
-; operator delete(void*, std::align_val_t)
-declare void @_ZdlPvSt11align_val_t(i8*, %align_val_t)
+; operator delete(ptr, std::align_val_t)
+declare void @_ZdlPvSt11align_val_t(ptr, %align_val_t)
-; operator delete[](void*, std::align_val_t)
-declare void @_ZdaPvSt11align_val_t(i8*, %align_val_t)
+; operator delete[](ptr, std::align_val_t)
+declare void @_ZdaPvSt11align_val_t(ptr, %align_val_t)
-; operator delete(void*, size_t = unsigned int, std::align_val_t)
-declare void @_ZdlPvjSt11align_val_t(i8*, %size_t, %align_val_t)
+; operator delete(ptr, size_t = unsigned int, std::align_val_t)
+declare void @_ZdlPvjSt11align_val_t(ptr, %size_t, %align_val_t)
-; operator delete[](void*, size_t = unsigned int, std::align_val_t)
-declare void @_ZdaPvjSt11align_val_t(i8*, %size_t, %align_val_t)
+; operator delete[](ptr, size_t = unsigned int, std::align_val_t)
+declare void @_ZdaPvjSt11align_val_t(ptr, %size_t, %align_val_t)
-; operator delete(void*, std::align_val_t, const std::nothrow_t&)
-declare void @_ZdlPvSt11align_val_tRKSt9nothrow_t(i8*, %align_val_t, %nothrow_t*)
+; operator delete(ptr, std::align_val_t, const std::nothrow_t&)
+declare void @_ZdlPvSt11align_val_tRKSt9nothrow_t(ptr, %align_val_t, ptr)
-; operator delete[](void*, std::align_val_t, const std::nothrow_t&)
-declare void @_ZdaPvSt11align_val_tRKSt9nothrow_t(i8*, %align_val_t, %nothrow_t*)
+; operator delete[](ptr, std::align_val_t, const std::nothrow_t&)
+declare void @_ZdaPvSt11align_val_tRKSt9nothrow_t(ptr, %align_val_t, ptr)
declare void @llvm.assume(i1)
;
%nt = alloca %nothrow_t
- %nwj = call i8* @_Znwj(%size_t 32)
- call void @_ZdlPvj(i8* %nwj, %size_t 32)
+ %nwj = call ptr @_Znwj(%size_t 32)
+ call void @_ZdlPvj(ptr %nwj, %size_t 32)
- %naj = call i8* @_Znaj(%size_t 32)
- call void @_ZdaPvj(i8* %naj, %size_t 32)
+ %naj = call ptr @_Znaj(%size_t 32)
+ call void @_ZdaPvj(ptr %naj, %size_t 32)
- %nwja = call i8* @_ZnwjSt11align_val_t(%size_t 32, %size_t 8)
- call void @_ZdlPvSt11align_val_t(i8* %nwja, %size_t 8)
+ %nwja = call ptr @_ZnwjSt11align_val_t(%size_t 32, %size_t 8)
+ call void @_ZdlPvSt11align_val_t(ptr %nwja, %size_t 8)
- %naja = call i8* @_ZnajSt11align_val_t(%size_t 32, %size_t 8)
- call void @_ZdaPvSt11align_val_t(i8* %naja, i32 8)
+ %naja = call ptr @_ZnajSt11align_val_t(%size_t 32, %size_t 8)
+ call void @_ZdaPvSt11align_val_t(ptr %naja, i32 8)
- %nwjat = call i8* @_ZnwjSt11align_val_tRKSt9nothrow_t(%size_t 32, %size_t 8, %nothrow_t* %nt)
- call void @_ZdlPvSt11align_val_tRKSt9nothrow_t(i8* %nwjat, %size_t 8, %nothrow_t* %nt)
+ %nwjat = call ptr @_ZnwjSt11align_val_tRKSt9nothrow_t(%size_t 32, %size_t 8, ptr %nt)
+ call void @_ZdlPvSt11align_val_tRKSt9nothrow_t(ptr %nwjat, %size_t 8, ptr %nt)
- %najat = call i8* @_ZnajSt11align_val_tRKSt9nothrow_t(%size_t 32, %size_t 8, %nothrow_t* %nt)
- call void @_ZdaPvSt11align_val_tRKSt9nothrow_t(i8* %najat, i32 8, %nothrow_t* %nt)
+ %najat = call ptr @_ZnajSt11align_val_tRKSt9nothrow_t(%size_t 32, %size_t 8, ptr %nt)
+ call void @_ZdaPvSt11align_val_tRKSt9nothrow_t(ptr %najat, i32 8, ptr %nt)
- %nwja2 = call i8* @_ZnwjSt11align_val_t(%size_t 32, %size_t 8)
- call void @_ZdlPvjSt11align_val_t(i8* %nwja2, %size_t 32, %size_t 8)
+ %nwja2 = call ptr @_ZnwjSt11align_val_t(%size_t 32, %size_t 8)
+ call void @_ZdlPvjSt11align_val_t(ptr %nwja2, %size_t 32, %size_t 8)
- %naja2 = call i8* @_ZnajSt11align_val_t(%size_t 32, %size_t 8)
- call void @_ZdaPvjSt11align_val_t(i8* %naja2, %size_t 32, %size_t 8)
+ %naja2 = call ptr @_ZnajSt11align_val_t(%size_t 32, %size_t 8)
+ call void @_ZdaPvjSt11align_val_t(ptr %naja2, %size_t 32, %size_t 8)
; Check that the alignment assume does not prevent the removal.
- %nwa3 = call i8* @_ZnajSt11align_val_t(%size_t 32, %size_t 16)
+ %nwa3 = call ptr @_ZnajSt11align_val_t(%size_t 32, %size_t 16)
- call void @llvm.assume(i1 true) [ "align"(i8* %nwa3, i32 16) ]
+ call void @llvm.assume(i1 true) [ "align"(ptr %nwa3, i32 16) ]
- call void @_ZdaPvjSt11align_val_t(i8* %nwa3, %size_t 32, %size_t 16)
+ call void @_ZdaPvjSt11align_val_t(ptr %nwa3, %size_t 32, %size_t 16)
ret void
}
; PR1201
; 'if (p) delete p;' cannot result in a call to 'operator delete(0)'.
-define void @test6a(i8* %foo) minsize {
+define void @test6a(ptr %foo) minsize {
; CHECK-LABEL: @test6a(
; CHECK-NEXT: entry:
-; CHECK-NEXT: [[TOBOOL:%.*]] = icmp eq i8* [[FOO:%.*]], null
+; CHECK-NEXT: [[TOBOOL:%.*]] = icmp eq ptr [[FOO:%.*]], null
; CHECK-NEXT: br i1 [[TOBOOL]], label [[IF_END:%.*]], label [[IF_THEN:%.*]]
; CHECK: if.then:
-; CHECK-NEXT: tail call void @_ZdlPv(i8* [[FOO]]) #[[ATTR5:[0-9]+]]
+; CHECK-NEXT: tail call void @_ZdlPv(ptr [[FOO]]) #[[ATTR5:[0-9]+]]
; CHECK-NEXT: br label [[IF_END]]
; CHECK: if.end:
; CHECK-NEXT: ret void
;
entry:
- %tobool = icmp eq i8* %foo, null
+ %tobool = icmp eq ptr %foo, null
br i1 %tobool, label %if.end, label %if.then
if.then: ; preds = %entry
- tail call void @_ZdlPv(i8* %foo) builtin
+ tail call void @_ZdlPv(ptr %foo) builtin
br label %if.end
if.end: ; preds = %entry, %if.then
ret void
}
-declare i8* @_ZnwmRKSt9nothrow_t(i64, i8*) nobuiltin
-declare void @_ZdlPvRKSt9nothrow_t(i8*, i8*) nobuiltin
+declare ptr @_ZnwmRKSt9nothrow_t(i64, ptr) nobuiltin
+declare void @_ZdlPvRKSt9nothrow_t(ptr, ptr) nobuiltin
declare i32 @__gxx_personality_v0(...)
-declare void @_ZN1AC2Ev(i8* %this)
+declare void @_ZN1AC2Ev(ptr %this)
-define void @test7() personality i8* bitcast (i32 (...)* @__gxx_personality_v0 to i8*) {
+define void @test7() personality ptr @__gxx_personality_v0 {
; CHECK-LABEL: @test7(
; CHECK-NEXT: entry:
-; CHECK-NEXT: invoke void @_ZN1AC2Ev(i8* undef)
+; CHECK-NEXT: invoke void @_ZN1AC2Ev(ptr undef)
; CHECK-NEXT: to label [[DOTNOEXC_I:%.*]] unwind label [[LPAD_I:%.*]]
; CHECK: .noexc.i:
; CHECK-NEXT: unreachable
; CHECK: lpad.i:
-; CHECK-NEXT: [[TMP0:%.*]] = landingpad { i8*, i32 }
+; CHECK-NEXT: [[TMP0:%.*]] = landingpad { ptr, i32 }
; CHECK-NEXT: cleanup
-; CHECK-NEXT: resume { i8*, i32 } [[TMP0]]
+; CHECK-NEXT: resume { ptr, i32 } [[TMP0]]
;
entry:
%nt = alloca i8
- %call.i = tail call i8* @_ZnwmRKSt9nothrow_t(i64 1, i8* %nt) builtin nounwind
- invoke void @_ZN1AC2Ev(i8* undef)
+ %call.i = tail call ptr @_ZnwmRKSt9nothrow_t(i64 1, ptr %nt) builtin nounwind
+ invoke void @_ZN1AC2Ev(ptr undef)
to label %.noexc.i unwind label %lpad.i
.noexc.i: ; preds = %entry
unreachable
lpad.i: ; preds = %entry
- %0 = landingpad { i8*, i32 } cleanup
- call void @_ZdlPvRKSt9nothrow_t(i8* %call.i, i8* %nt) builtin nounwind
- resume { i8*, i32 } %0
+ %0 = landingpad { ptr, i32 } cleanup
+ call void @_ZdlPvRKSt9nothrow_t(ptr %call.i, ptr %nt) builtin nounwind
+ resume { ptr, i32 } %0
}
-declare i8* @_Znwm(i64) nobuiltin
-define i8* @_Znwj(i32 %n) nobuiltin {
+declare ptr @_Znwm(i64) nobuiltin
+define ptr @_Znwj(i32 %n) nobuiltin {
; CHECK-LABEL: @_Znwj(
; CHECK-NEXT: [[Z:%.*]] = zext i32 [[N:%.*]] to i64
-; CHECK-NEXT: [[M:%.*]] = call i8* @_Znwm(i64 [[Z]])
-; CHECK-NEXT: ret i8* [[M]]
+; CHECK-NEXT: [[M:%.*]] = call ptr @_Znwm(i64 [[Z]])
+; CHECK-NEXT: ret ptr [[M]]
;
%z = zext i32 %n to i64
- %m = call i8* @_Znwm(i64 %z)
- ret i8* %m
+ %m = call ptr @_Znwm(i64 %z)
+ ret ptr %m
}
-declare i8* @_Znam(i64) nobuiltin
-declare i8* @_Znaj(i32) nobuiltin
-declare void @_ZdlPv(i8*) nobuiltin
-declare void @_ZdaPv(i8*) nobuiltin
+declare ptr @_Znam(i64) nobuiltin
+declare ptr @_Znaj(i32) nobuiltin
+declare void @_ZdlPv(ptr) nobuiltin
+declare void @_ZdaPv(ptr) nobuiltin
-define linkonce void @_ZdlPvm(i8* %p, i64) nobuiltin {
+define linkonce void @_ZdlPvm(ptr %p, i64) nobuiltin {
; CHECK-LABEL: @_ZdlPvm(
-; CHECK-NEXT: call void @_ZdlPv(i8* [[P:%.*]])
+; CHECK-NEXT: call void @_ZdlPv(ptr [[P:%.*]])
; CHECK-NEXT: ret void
;
- call void @_ZdlPv(i8* %p)
+ call void @_ZdlPv(ptr %p)
ret void
}
-define linkonce void @_ZdlPvj(i8* %p, i32) nobuiltin {
+define linkonce void @_ZdlPvj(ptr %p, i32) nobuiltin {
; CHECK-LABEL: @_ZdlPvj(
-; CHECK-NEXT: call void @_ZdlPv(i8* [[P:%.*]])
+; CHECK-NEXT: call void @_ZdlPv(ptr [[P:%.*]])
; CHECK-NEXT: ret void
;
- call void @_ZdlPv(i8* %p)
+ call void @_ZdlPv(ptr %p)
ret void
}
-define linkonce void @_ZdaPvm(i8* %p, i64) nobuiltin {
+define linkonce void @_ZdaPvm(ptr %p, i64) nobuiltin {
; CHECK-LABEL: @_ZdaPvm(
-; CHECK-NEXT: call void @_ZdaPv(i8* [[P:%.*]])
+; CHECK-NEXT: call void @_ZdaPv(ptr [[P:%.*]])
; CHECK-NEXT: ret void
;
- call void @_ZdaPv(i8* %p)
+ call void @_ZdaPv(ptr %p)
ret void
}
-define linkonce void @_ZdaPvj(i8* %p, i32) nobuiltin {
+define linkonce void @_ZdaPvj(ptr %p, i32) nobuiltin {
; CHECK-LABEL: @_ZdaPvj(
-; CHECK-NEXT: call void @_ZdaPv(i8* [[P:%.*]])
+; CHECK-NEXT: call void @_ZdaPv(ptr [[P:%.*]])
; CHECK-NEXT: ret void
;
- call void @_ZdaPv(i8* %p)
+ call void @_ZdaPv(ptr %p)
ret void
}
; new(size_t, align_val_t)
-declare i8* @_ZnwmSt11align_val_t(i64, i64) nobuiltin
+declare ptr @_ZnwmSt11align_val_t(i64, i64) nobuiltin
; new[](size_t, align_val_t)
-declare i8* @_ZnamSt11align_val_t(i64, i64) nobuiltin
+declare ptr @_ZnamSt11align_val_t(i64, i64) nobuiltin
; new(size_t, align_val_t, nothrow)
-declare i8* @_ZnwmSt11align_val_tRKSt9nothrow_t(i64, i64, i8*) nobuiltin
+declare ptr @_ZnwmSt11align_val_tRKSt9nothrow_t(i64, i64, ptr) nobuiltin
; new[](size_t, align_val_t, nothrow)
-declare i8* @_ZnamSt11align_val_tRKSt9nothrow_t(i64, i64, i8*) nobuiltin
-; delete(void*, align_val_t)
-declare void @_ZdlPvSt11align_val_t(i8*, i64) nobuiltin
-; delete[](void*, align_val_t)
-declare void @_ZdaPvSt11align_val_t(i8*, i64) nobuiltin
-; delete(void*, align_val_t, nothrow)
-declare void @_ZdlPvSt11align_val_tRKSt9nothrow_t(i8*, i64, i8*) nobuiltin
-; delete[](void*, align_val_t, nothrow)
-declare void @_ZdaPvSt11align_val_tRKSt9nothrow_t(i8*, i64, i8*) nobuiltin
-; delete(void*, unsigned long, align_val_t)
-declare void @_ZdlPvmSt11align_val_t(i8*, i64, i64) nobuiltin
-; delete[](void*, unsigned long, align_val_t)
-declare void @_ZdaPvmSt11align_val_t(i8*, i64, i64) nobuiltin
+declare ptr @_ZnamSt11align_val_tRKSt9nothrow_t(i64, i64, ptr) nobuiltin
+; delete(ptr, align_val_t)
+declare void @_ZdlPvSt11align_val_t(ptr, i64) nobuiltin
+; delete[](ptr, align_val_t)
+declare void @_ZdaPvSt11align_val_t(ptr, i64) nobuiltin
+; delete(ptr, align_val_t, nothrow)
+declare void @_ZdlPvSt11align_val_tRKSt9nothrow_t(ptr, i64, ptr) nobuiltin
+; delete[](ptr, align_val_t, nothrow)
+declare void @_ZdaPvSt11align_val_tRKSt9nothrow_t(ptr, i64, ptr) nobuiltin
+; delete(ptr, unsigned long, align_val_t)
+declare void @_ZdlPvmSt11align_val_t(ptr, i64, i64) nobuiltin
+; delete[](ptr, unsigned long, align_val_t)
+declare void @_ZdaPvmSt11align_val_t(ptr, i64, i64) nobuiltin
declare void @llvm.assume(i1)
; CHECK-NEXT: ret void
;
%nt = alloca i8
- %nw = call i8* @_Znwm(i64 32) builtin
- call void @_ZdlPv(i8* %nw) builtin
- %na = call i8* @_Znam(i64 32) builtin
- call void @_ZdaPv(i8* %na) builtin
- %nwm = call i8* @_Znwm(i64 32) builtin
- call void @_ZdlPvm(i8* %nwm, i64 32) builtin
- %nam = call i8* @_Znam(i64 32) builtin
- call void @_ZdaPvm(i8* %nam, i64 32) builtin
- %nwa = call i8* @_ZnwmSt11align_val_t(i64 32, i64 8) builtin
- call void @_ZdlPvSt11align_val_t(i8* %nwa, i64 8) builtin
- %naa = call i8* @_ZnamSt11align_val_t(i64 32, i64 8) builtin
- call void @_ZdaPvSt11align_val_t(i8* %naa, i64 8) builtin
- %nwat = call i8* @_ZnwmSt11align_val_tRKSt9nothrow_t(i64 32, i64 8, i8* %nt) builtin
- call void @_ZdlPvSt11align_val_tRKSt9nothrow_t(i8* %nwat, i64 8, i8* %nt) builtin
- %naat = call i8* @_ZnamSt11align_val_tRKSt9nothrow_t(i64 32, i64 8, i8* %nt) builtin
- call void @_ZdaPvSt11align_val_tRKSt9nothrow_t(i8* %naat, i64 8, i8* %nt) builtin
- %nwa2 = call i8* @_ZnwmSt11align_val_t(i64 32, i64 8) builtin
- call void @_ZdlPvmSt11align_val_t(i8* %nwa2, i64 32, i64 8) builtin
- %naa2 = call i8* @_ZnamSt11align_val_t(i64 32, i64 8) builtin
- call void @_ZdaPvmSt11align_val_t(i8* %naa2, i64 32, i64 8) builtin
+ %nw = call ptr @_Znwm(i64 32) builtin
+ call void @_ZdlPv(ptr %nw) builtin
+ %na = call ptr @_Znam(i64 32) builtin
+ call void @_ZdaPv(ptr %na) builtin
+ %nwm = call ptr @_Znwm(i64 32) builtin
+ call void @_ZdlPvm(ptr %nwm, i64 32) builtin
+ %nam = call ptr @_Znam(i64 32) builtin
+ call void @_ZdaPvm(ptr %nam, i64 32) builtin
+ %nwa = call ptr @_ZnwmSt11align_val_t(i64 32, i64 8) builtin
+ call void @_ZdlPvSt11align_val_t(ptr %nwa, i64 8) builtin
+ %naa = call ptr @_ZnamSt11align_val_t(i64 32, i64 8) builtin
+ call void @_ZdaPvSt11align_val_t(ptr %naa, i64 8) builtin
+ %nwat = call ptr @_ZnwmSt11align_val_tRKSt9nothrow_t(i64 32, i64 8, ptr %nt) builtin
+ call void @_ZdlPvSt11align_val_tRKSt9nothrow_t(ptr %nwat, i64 8, ptr %nt) builtin
+ %naat = call ptr @_ZnamSt11align_val_tRKSt9nothrow_t(i64 32, i64 8, ptr %nt) builtin
+ call void @_ZdaPvSt11align_val_tRKSt9nothrow_t(ptr %naat, i64 8, ptr %nt) builtin
+ %nwa2 = call ptr @_ZnwmSt11align_val_t(i64 32, i64 8) builtin
+ call void @_ZdlPvmSt11align_val_t(ptr %nwa2, i64 32, i64 8) builtin
+ %naa2 = call ptr @_ZnamSt11align_val_t(i64 32, i64 8) builtin
+ call void @_ZdaPvmSt11align_val_t(ptr %naa2, i64 32, i64 8) builtin
; Check that the alignment assume does not prevent the removal.
- %nwa3 = call i8* @_ZnwmSt11align_val_t(i64 32, i64 16) builtin
- call void @llvm.assume(i1 true) [ "align"(i8* %nwa3, i64 16) ]
- call void @_ZdlPvmSt11align_val_t(i8* %nwa3, i64 32, i64 16) builtin
+ %nwa3 = call ptr @_ZnwmSt11align_val_t(i64 32, i64 16) builtin
+ call void @llvm.assume(i1 true) [ "align"(ptr %nwa3, i64 16) ]
+ call void @_ZdlPvmSt11align_val_t(ptr %nwa3, i64 32, i64 16) builtin
ret void
}
define void @test10() {
; CHECK-LABEL: @test10(
-; CHECK-NEXT: call void @_ZdlPv(i8* null)
+; CHECK-NEXT: call void @_ZdlPv(ptr null)
; CHECK-NEXT: ret void
;
- call void @_ZdlPv(i8* null)
+ call void @_ZdlPv(ptr null)
ret void
}
define void @test11() {
; CHECK-LABEL: @test11(
-; CHECK-NEXT: [[CALL:%.*]] = call dereferenceable_or_null(8) i8* @_Znwm(i64 8) #[[ATTR5]]
-; CHECK-NEXT: call void @_ZdlPv(i8* [[CALL]])
+; CHECK-NEXT: [[CALL:%.*]] = call dereferenceable_or_null(8) ptr @_Znwm(i64 8) #[[ATTR5]]
+; CHECK-NEXT: call void @_ZdlPv(ptr [[CALL]])
; CHECK-NEXT: ret void
;
- %call = call i8* @_Znwm(i64 8) builtin
- call void @_ZdlPv(i8* %call)
+ %call = call ptr @_Znwm(i64 8) builtin
+ call void @_ZdlPv(ptr %call)
ret void
}
-declare i8* @llvm.launder.invariant.group(i8*)
-declare i8* @llvm.strip.invariant.group(i8*)
+declare ptr @llvm.launder.invariant.group(ptr)
+declare ptr @llvm.strip.invariant.group(ptr)
define void @test17() {
; CHECK-LABEL: @test17(
; CHECK-NEXT: ret void
;
- %nw1 = call i8* @_Znwm(i64 32) builtin
- %nw2 = call i8* @llvm.launder.invariant.group(i8* %nw1)
- %nw3 = call i8* @llvm.strip.invariant.group(i8* %nw2)
- store i8 1, i8* %nw3
- call void @_ZdlPv(i8* %nw2) builtin
+ %nw1 = call ptr @_Znwm(i64 32) builtin
+ %nw2 = call ptr @llvm.launder.invariant.group(ptr %nw1)
+ %nw3 = call ptr @llvm.strip.invariant.group(ptr %nw2)
+ store i8 1, ptr %nw3
+ call void @_ZdlPv(ptr %nw2) builtin
ret void
}
; REQUIRES: x86-registered-target
; PR1201
-declare noalias i8* @"\01??2@YAPEAX_K@Z"(i64) nobuiltin
-declare void @"\01??3@YAXPEAX@Z"(i8*) nobuiltin
+declare noalias ptr @"\01??2@YAPEAX_K@Z"(i64) nobuiltin
+declare void @"\01??3@YAXPEAX@Z"(ptr) nobuiltin
define void @test9() {
; CHECK-LABEL: @test9(
; CHECK-NEXT: ret void
;
- %new_long_long = call noalias i8* @"\01??2@YAPEAX_K@Z"(i64 32) builtin
- call void @"\01??3@YAXPEAX@Z"(i8* %new_long_long) builtin
+ %new_long_long = call noalias ptr @"\01??2@YAPEAX_K@Z"(i64 32) builtin
+ call void @"\01??3@YAXPEAX@Z"(ptr %new_long_long) builtin
ret void
}
; CHECK: ret
define double @mysqrt(double %x) nounwind {
entry:
- %x_addr = alloca double ; <double*> [#uses=2]
- %retval = alloca double, align 8 ; <double*> [#uses=2]
- %0 = alloca double, align 8 ; <double*> [#uses=2]
+ %x_addr = alloca double ; <ptr> [#uses=2]
+ %retval = alloca double, align 8 ; <ptr> [#uses=2]
+ %0 = alloca double, align 8 ; <ptr> [#uses=2]
%"alloca point" = bitcast i32 0 to i32 ; <i32> [#uses=0]
- store double %x, double* %x_addr
- %1 = load double, double* %x_addr, align 8 ; <double> [#uses=1]
+ store double %x, ptr %x_addr
+ %1 = load double, ptr %x_addr, align 8 ; <double> [#uses=1]
%2 = call double @fabs(double %1) nounwind readnone ; <double> [#uses=1]
%3 = call double @sqrt(double %2) nounwind readonly ; <double> [#uses=1]
%4 = fadd double %3, 0.000000e+00 ; <double> [#uses=1]
- store double %4, double* %0, align 8
- %5 = load double, double* %0, align 8 ; <double> [#uses=1]
- store double %5, double* %retval, align 8
+ store double %4, ptr %0, align 8
+ %5 = load double, ptr %0, align 8 ; <double> [#uses=1]
+ store double %5, ptr %retval, align 8
br label %return
return: ; preds = %entry
- %retval1 = load double, double* %retval ; <double> [#uses=1]
+ %retval1 = load double, ptr %retval ; <double> [#uses=1]
ret double %retval1
}
unreachable
}
-define dso_local void @test() personality i8* bitcast (i32 (...)* @__gxx_personality_v0 to i8*) {
+define dso_local void @test() personality ptr @__gxx_personality_v0 {
entry:
; CHECK: define dso_local void @test()
; CHECK-NEXT: entry:
ret void
lpad:
- %0 = landingpad { i8*, i32 }
+ %0 = landingpad { ptr, i32 }
cleanup
- call void (i8*, ...) @printf(i8* getelementptr inbounds ([7 x i8], [7 x i8]* @.str.2, i64 0, i64 0))
- resume { i8*, i32 } %0
+ call void (ptr, ...) @printf(ptr @.str.2)
+ resume { ptr, i32 } %0
}
declare dso_local i32 @__gxx_personality_v0(...)
-declare dso_local void @printf(i8*, ...)
+declare dso_local void @printf(ptr, ...)
; NOTE: Assertions have been autogenerated by utils/update_test_checks.py
; RUN: opt -passes=instcombine -S < %s | FileCheck %s
-define void @test01(i8* %ptr0, i8* %ptr1) {
+define void @test01(ptr %ptr0, ptr %ptr1) {
; CHECK-LABEL: @test01(
-; CHECK-NEXT: store i8 42, i8* [[PTR0:%.*]], align 1
-; CHECK-NEXT: store i8 43, i8* [[PTR1:%.*]], align 1
+; CHECK-NEXT: store i8 42, ptr [[PTR0:%.*]], align 1
+; CHECK-NEXT: store i8 43, ptr [[PTR1:%.*]], align 1
; CHECK-NEXT: ret void
;
call void @llvm.experimental.noalias.scope.decl(metadata !0)
- store i8 42, i8* %ptr0
- store i8 43, i8* %ptr1
+ store i8 42, ptr %ptr0
+ store i8 43, ptr %ptr1
ret void
}
-define void @test02_keep(i8* %ptr0, i8* %ptr1) {
+define void @test02_keep(ptr %ptr0, ptr %ptr1) {
; CHECK-LABEL: @test02_keep(
; CHECK-NEXT: call void @llvm.experimental.noalias.scope.decl(metadata !0)
-; CHECK-NEXT: store i8 42, i8* [[PTR0:%.*]], align 1, !alias.scope !0
-; CHECK-NEXT: store i8 43, i8* [[PTR1:%.*]], align 1, !noalias !3
+; CHECK-NEXT: store i8 42, ptr [[PTR0:%.*]], align 1, !alias.scope !0
+; CHECK-NEXT: store i8 43, ptr [[PTR1:%.*]], align 1, !noalias !3
; CHECK-NEXT: ret void
;
call void @llvm.experimental.noalias.scope.decl(metadata !0)
- store i8 42, i8* %ptr0, !alias.scope !0
- store i8 43, i8* %ptr1, !noalias !5
+ store i8 42, ptr %ptr0, !alias.scope !0
+ store i8 43, ptr %ptr1, !noalias !5
ret void
}
-define void @test03(i8* %ptr0, i8* %ptr1) {
+define void @test03(ptr %ptr0, ptr %ptr1) {
; CHECK-LABEL: @test03(
-; CHECK-NEXT: store i8 42, i8* [[PTR0:%.*]], align 1, !alias.scope !5
-; CHECK-NEXT: store i8 43, i8* [[PTR1:%.*]], align 1, !noalias !3
+; CHECK-NEXT: store i8 42, ptr [[PTR0:%.*]], align 1, !alias.scope !5
+; CHECK-NEXT: store i8 43, ptr [[PTR1:%.*]], align 1, !noalias !3
; CHECK-NEXT: ret void
;
call void @llvm.experimental.noalias.scope.decl(metadata !0)
- store i8 42, i8* %ptr0, !alias.scope !3
- store i8 43, i8* %ptr1, !noalias !5
+ store i8 42, ptr %ptr0, !alias.scope !3
+ store i8 43, ptr %ptr1, !noalias !5
ret void
}
-define void @test04_keep(i8* %ptr0, i8* %ptr1) {
+define void @test04_keep(ptr %ptr0, ptr %ptr1) {
; CHECK-LABEL: @test04_keep(
; CHECK-NEXT: call void @llvm.experimental.noalias.scope.decl(metadata !0)
-; CHECK-NEXT: store i8 42, i8* [[PTR0:%.*]], align 1, !alias.scope !3
-; CHECK-NEXT: store i8 43, i8* [[PTR1:%.*]], align 1, !noalias !3
+; CHECK-NEXT: store i8 42, ptr [[PTR0:%.*]], align 1, !alias.scope !3
+; CHECK-NEXT: store i8 43, ptr [[PTR1:%.*]], align 1, !noalias !3
; CHECK-NEXT: ret void
;
call void @llvm.experimental.noalias.scope.decl(metadata !0)
- store i8 42, i8* %ptr0, !alias.scope !5
- store i8 43, i8* %ptr1, !noalias !5
+ store i8 42, ptr %ptr0, !alias.scope !5
+ store i8 43, ptr %ptr1, !noalias !5
ret void
}
-define void @test05_keep(i8* %ptr0, i8* %ptr1) {
+define void @test05_keep(ptr %ptr0, ptr %ptr1) {
; CHECK-LABEL: @test05_keep(
; CHECK-NEXT: call void @llvm.experimental.noalias.scope.decl(metadata !0)
-; CHECK-NEXT: store i8 42, i8* [[PTR0:%.*]], align 1, !alias.scope !3
-; CHECK-NEXT: store i8 43, i8* [[PTR1:%.*]], align 1, !noalias !0
+; CHECK-NEXT: store i8 42, ptr [[PTR0:%.*]], align 1, !alias.scope !3
+; CHECK-NEXT: store i8 43, ptr [[PTR1:%.*]], align 1, !noalias !0
; CHECK-NEXT: ret void
;
call void @llvm.experimental.noalias.scope.decl(metadata !0)
- store i8 42, i8* %ptr0, !alias.scope !5
- store i8 43, i8* %ptr1, !noalias !0
+ store i8 42, ptr %ptr0, !alias.scope !5
+ store i8 43, ptr %ptr1, !noalias !0
ret void
}
-define void @test06(i8* %ptr0, i8* %ptr1) {
+define void @test06(ptr %ptr0, ptr %ptr1) {
; CHECK-LABEL: @test06(
-; CHECK-NEXT: store i8 42, i8* [[PTR0:%.*]], align 1, !alias.scope !3
-; CHECK-NEXT: store i8 43, i8* [[PTR1:%.*]], align 1, !noalias !5
+; CHECK-NEXT: store i8 42, ptr [[PTR0:%.*]], align 1, !alias.scope !3
+; CHECK-NEXT: store i8 43, ptr [[PTR1:%.*]], align 1, !noalias !5
; CHECK-NEXT: ret void
;
call void @llvm.experimental.noalias.scope.decl(metadata !0)
- store i8 42, i8* %ptr0, !alias.scope !5
- store i8 43, i8* %ptr1, !noalias !3
+ store i8 42, ptr %ptr0, !alias.scope !5
+ store i8 43, ptr %ptr1, !noalias !3
ret void
}
-define void @test07(i8* %ptr0, i8* %ptr1) {
+define void @test07(ptr %ptr0, ptr %ptr1) {
; CHECK-LABEL: @test07(
-; CHECK-NEXT: store i8 42, i8* [[PTR0:%.*]], align 1, !alias.scope !0
-; CHECK-NEXT: store i8 43, i8* [[PTR1:%.*]], align 1, !noalias !5
+; CHECK-NEXT: store i8 42, ptr [[PTR0:%.*]], align 1, !alias.scope !0
+; CHECK-NEXT: store i8 43, ptr [[PTR1:%.*]], align 1, !noalias !5
; CHECK-NEXT: ret void
;
call void @llvm.experimental.noalias.scope.decl(metadata !0)
- store i8 42, i8* %ptr0, !alias.scope !0
- store i8 43, i8* %ptr1, !noalias !3
+ store i8 42, ptr %ptr0, !alias.scope !0
+ store i8 43, ptr %ptr1, !noalias !3
ret void
}
-define void @test08(i8* %ptr0, i8* %ptr1) {
+define void @test08(ptr %ptr0, ptr %ptr1) {
; CHECK-LABEL: @test08(
-; CHECK-NEXT: store i8 42, i8* [[PTR0:%.*]], align 1, !alias.scope !5
-; CHECK-NEXT: store i8 43, i8* [[PTR1:%.*]], align 1, !noalias !0
+; CHECK-NEXT: store i8 42, ptr [[PTR0:%.*]], align 1, !alias.scope !5
+; CHECK-NEXT: store i8 43, ptr [[PTR1:%.*]], align 1, !noalias !0
; CHECK-NEXT: ret void
;
call void @llvm.experimental.noalias.scope.decl(metadata !0)
- store i8 42, i8* %ptr0, !alias.scope !3
- store i8 43, i8* %ptr1, !noalias !0
+ store i8 42, ptr %ptr0, !alias.scope !3
+ store i8 43, ptr %ptr1, !noalias !0
ret void
}
-define void @test11(i8* %ptr0) {
+define void @test11(ptr %ptr0) {
; CHECK-LABEL: @test11(
-; CHECK-NEXT: store i8 42, i8* [[PTR0:%.*]], align 1, !alias.scope !0
+; CHECK-NEXT: store i8 42, ptr [[PTR0:%.*]], align 1, !alias.scope !0
; CHECK-NEXT: ret void
;
call void @llvm.experimental.noalias.scope.decl(metadata !0)
- store i8 42, i8* %ptr0, !alias.scope !0
+ store i8 42, ptr %ptr0, !alias.scope !0
ret void
}
-define void @test12(i8* %ptr0) {
+define void @test12(ptr %ptr0) {
; CHECK-LABEL: @test12(
-; CHECK-NEXT: store i8 42, i8* [[PTR0:%.*]], align 1, !alias.scope !5
+; CHECK-NEXT: store i8 42, ptr [[PTR0:%.*]], align 1, !alias.scope !5
; CHECK-NEXT: ret void
;
call void @llvm.experimental.noalias.scope.decl(metadata !0)
- store i8 42, i8* %ptr0, !alias.scope !3
+ store i8 42, ptr %ptr0, !alias.scope !3
ret void
}
-define void @test13(i8* %ptr0) {
+define void @test13(ptr %ptr0) {
; CHECK-LABEL: @test13(
-; CHECK-NEXT: store i8 42, i8* [[PTR0:%.*]], align 1, !alias.scope !3
+; CHECK-NEXT: store i8 42, ptr [[PTR0:%.*]], align 1, !alias.scope !3
; CHECK-NEXT: ret void
;
call void @llvm.experimental.noalias.scope.decl(metadata !0)
- store i8 42, i8* %ptr0, !alias.scope !5
+ store i8 42, ptr %ptr0, !alias.scope !5
ret void
}
-define void @test14(i8* %ptr0) {
+define void @test14(ptr %ptr0) {
; CHECK-LABEL: @test14(
-; CHECK-NEXT: store i8 42, i8* [[PTR0:%.*]], align 1, !noalias !0
+; CHECK-NEXT: store i8 42, ptr [[PTR0:%.*]], align 1, !noalias !0
; CHECK-NEXT: ret void
;
call void @llvm.experimental.noalias.scope.decl(metadata !0)
- store i8 42, i8* %ptr0, !noalias !0
+ store i8 42, ptr %ptr0, !noalias !0
ret void
}
-define void @test15(i8* %ptr0) {
+define void @test15(ptr %ptr0) {
; CHECK-LABEL: @test15(
-; CHECK-NEXT: store i8 42, i8* [[PTR0:%.*]], align 1, !noalias !5
+; CHECK-NEXT: store i8 42, ptr [[PTR0:%.*]], align 1, !noalias !5
; CHECK-NEXT: ret void
;
call void @llvm.experimental.noalias.scope.decl(metadata !0)
- store i8 42, i8* %ptr0, !noalias !3
+ store i8 42, ptr %ptr0, !noalias !3
ret void
}
-define void @test16(i8* %ptr0) {
+define void @test16(ptr %ptr0) {
; CHECK-LABEL: @test16(
-; CHECK-NEXT: store i8 42, i8* [[PTR0:%.*]], align 1, !noalias !3
+; CHECK-NEXT: store i8 42, ptr [[PTR0:%.*]], align 1, !noalias !3
; CHECK-NEXT: ret void
;
call void @llvm.experimental.noalias.scope.decl(metadata !0)
- store i8 42, i8* %ptr0, !noalias !5
+ store i8 42, ptr %ptr0, !noalias !5
ret void
}
@as0 = external global i32
@as1 = external addrspace(1) global i32
-declare void @addrspace0(i32*)
-declare void @addrspace1(i32 addrspace(1)*)
+declare void @addrspace0(ptr)
+declare void @addrspace1(ptr addrspace(1))
-; CHECK: call void @addrspace0(i32* nonnull @as0)
-; CHECK: call void @addrspace1(i32 addrspace(1)* @as1)
+; CHECK: call void @addrspace0(ptr nonnull @as0)
+; CHECK: call void @addrspace1(ptr addrspace(1) @as1)
define void @test() {
- call void @addrspace0(i32* @as0)
- call void @addrspace1(i32 addrspace(1)* @as1)
+ call void @addrspace0(ptr @as0)
+ call void @addrspace1(ptr addrspace(1) @as1)
ret void
}
; Passing select(cond, null, v) as nonnull should be optimized to passing v
-define nonnull i32* @pr48975(i32** %.0) {
+define nonnull ptr @pr48975(ptr %.0) {
; CHECK-LABEL: @pr48975(
-; CHECK-NEXT: [[DOT1:%.*]] = load i32*, i32** [[DOT0:%.*]], align 8
-; CHECK-NEXT: [[DOT2:%.*]] = icmp eq i32* [[DOT1]], null
-; CHECK-NEXT: [[DOT3:%.*]] = bitcast i32** [[DOT0]] to i32*
-; CHECK-NEXT: [[DOT4:%.*]] = select i1 [[DOT2]], i32* null, i32* [[DOT3]]
-; CHECK-NEXT: ret i32* [[DOT4]]
+; CHECK-NEXT: [[DOT1:%.*]] = load ptr, ptr [[DOT0:%.*]], align 8
+; CHECK-NEXT: [[DOT2:%.*]] = icmp eq ptr [[DOT1]], null
+; CHECK-NEXT: [[DOT4:%.*]] = select i1 [[DOT2]], ptr null, ptr [[DOT0]]
+; CHECK-NEXT: ret ptr [[DOT4]]
;
- %.1 = load i32*, i32** %.0, align 8
- %.2 = icmp eq i32* %.1, null
- %.3 = bitcast i32** %.0 to i32*
- %.4 = select i1 %.2, i32* null, i32* %.3
- ret i32* %.4
+ %.1 = load ptr, ptr %.0, align 8
+ %.2 = icmp eq ptr %.1, null
+ %.4 = select i1 %.2, ptr null, ptr %.0
+ ret ptr %.4
}
-define nonnull i32* @nonnull_ret(i1 %cond, i32* %p) {
+define nonnull ptr @nonnull_ret(i1 %cond, ptr %p) {
; CHECK-LABEL: @nonnull_ret(
-; CHECK-NEXT: [[RES:%.*]] = select i1 [[COND:%.*]], i32* [[P:%.*]], i32* null
-; CHECK-NEXT: ret i32* [[RES]]
+; CHECK-NEXT: [[RES:%.*]] = select i1 [[COND:%.*]], ptr [[P:%.*]], ptr null
+; CHECK-NEXT: ret ptr [[RES]]
;
- %res = select i1 %cond, i32* %p, i32* null
- ret i32* %res
+ %res = select i1 %cond, ptr %p, ptr null
+ ret ptr %res
}
-define nonnull i32* @nonnull_ret2(i1 %cond, i32* %p) {
+define nonnull ptr @nonnull_ret2(i1 %cond, ptr %p) {
; CHECK-LABEL: @nonnull_ret2(
-; CHECK-NEXT: [[RES:%.*]] = select i1 [[COND:%.*]], i32* null, i32* [[P:%.*]]
-; CHECK-NEXT: ret i32* [[RES]]
+; CHECK-NEXT: [[RES:%.*]] = select i1 [[COND:%.*]], ptr null, ptr [[P:%.*]]
+; CHECK-NEXT: ret ptr [[RES]]
;
- %res = select i1 %cond, i32* null, i32* %p
- ret i32* %res
+ %res = select i1 %cond, ptr null, ptr %p
+ ret ptr %res
}
-define nonnull noundef i32* @nonnull_noundef_ret(i1 %cond, i32* %p) {
+define nonnull noundef ptr @nonnull_noundef_ret(i1 %cond, ptr %p) {
; CHECK-LABEL: @nonnull_noundef_ret(
-; CHECK-NEXT: [[RES:%.*]] = select i1 [[COND:%.*]], i32* [[P:%.*]], i32* null
-; CHECK-NEXT: ret i32* [[RES]]
+; CHECK-NEXT: [[RES:%.*]] = select i1 [[COND:%.*]], ptr [[P:%.*]], ptr null
+; CHECK-NEXT: ret ptr [[RES]]
;
- %res = select i1 %cond, i32* %p, i32* null
- ret i32* %res
+ %res = select i1 %cond, ptr %p, ptr null
+ ret ptr %res
}
-define nonnull noundef i32* @nonnull_noundef_ret2(i1 %cond, i32* %p) {
+define nonnull noundef ptr @nonnull_noundef_ret2(i1 %cond, ptr %p) {
; CHECK-LABEL: @nonnull_noundef_ret2(
-; CHECK-NEXT: [[RES:%.*]] = select i1 [[COND:%.*]], i32* null, i32* [[P:%.*]]
-; CHECK-NEXT: ret i32* [[RES]]
+; CHECK-NEXT: [[RES:%.*]] = select i1 [[COND:%.*]], ptr null, ptr [[P:%.*]]
+; CHECK-NEXT: ret ptr [[RES]]
;
- %res = select i1 %cond, i32* null, i32* %p
- ret i32* %res
+ %res = select i1 %cond, ptr null, ptr %p
+ ret ptr %res
}
-define void @nonnull_call(i1 %cond, i32* %p) {
+define void @nonnull_call(i1 %cond, ptr %p) {
; CHECK-LABEL: @nonnull_call(
-; CHECK-NEXT: [[RES:%.*]] = select i1 [[COND:%.*]], i32* [[P:%.*]], i32* null
-; CHECK-NEXT: call void @f(i32* nonnull [[RES]])
+; CHECK-NEXT: [[RES:%.*]] = select i1 [[COND:%.*]], ptr [[P:%.*]], ptr null
+; CHECK-NEXT: call void @f(ptr nonnull [[RES]])
; CHECK-NEXT: ret void
;
- %res = select i1 %cond, i32* %p, i32* null
- call void @f(i32* nonnull %res)
+ %res = select i1 %cond, ptr %p, ptr null
+ call void @f(ptr nonnull %res)
ret void
}
-define void @nonnull_call2(i1 %cond, i32* %p) {
+define void @nonnull_call2(i1 %cond, ptr %p) {
; CHECK-LABEL: @nonnull_call2(
-; CHECK-NEXT: [[RES:%.*]] = select i1 [[COND:%.*]], i32* null, i32* [[P:%.*]]
-; CHECK-NEXT: call void @f(i32* nonnull [[RES]])
+; CHECK-NEXT: [[RES:%.*]] = select i1 [[COND:%.*]], ptr null, ptr [[P:%.*]]
+; CHECK-NEXT: call void @f(ptr nonnull [[RES]])
; CHECK-NEXT: ret void
;
- %res = select i1 %cond, i32* null, i32* %p
- call void @f(i32* nonnull %res)
+ %res = select i1 %cond, ptr null, ptr %p
+ call void @f(ptr nonnull %res)
ret void
}
-define void @nonnull_noundef_call(i1 %cond, i32* %p) {
+define void @nonnull_noundef_call(i1 %cond, ptr %p) {
; CHECK-LABEL: @nonnull_noundef_call(
-; CHECK-NEXT: [[RES:%.*]] = select i1 [[COND:%.*]], i32* [[P:%.*]], i32* null
-; CHECK-NEXT: call void @f(i32* noundef nonnull [[RES]])
+; CHECK-NEXT: [[RES:%.*]] = select i1 [[COND:%.*]], ptr [[P:%.*]], ptr null
+; CHECK-NEXT: call void @f(ptr noundef nonnull [[RES]])
; CHECK-NEXT: ret void
;
- %res = select i1 %cond, i32* %p, i32* null
- call void @f(i32* nonnull noundef %res)
+ %res = select i1 %cond, ptr %p, ptr null
+ call void @f(ptr nonnull noundef %res)
ret void
}
-define void @nonnull_noundef_call2(i1 %cond, i32* %p) {
+define void @nonnull_noundef_call2(i1 %cond, ptr %p) {
; CHECK-LABEL: @nonnull_noundef_call2(
-; CHECK-NEXT: [[RES:%.*]] = select i1 [[COND:%.*]], i32* null, i32* [[P:%.*]]
-; CHECK-NEXT: call void @f(i32* noundef nonnull [[RES]])
+; CHECK-NEXT: [[RES:%.*]] = select i1 [[COND:%.*]], ptr null, ptr [[P:%.*]]
+; CHECK-NEXT: call void @f(ptr noundef nonnull [[RES]])
; CHECK-NEXT: ret void
;
- %res = select i1 %cond, i32* null, i32* %p
- call void @f(i32* nonnull noundef %res)
+ %res = select i1 %cond, ptr null, ptr %p
+ call void @f(ptr nonnull noundef %res)
ret void
}
-declare void @f(i32*)
+declare void @f(ptr)
; CHECK-LABEL: @pr50370(
; CHECK-NEXT: entry:
; CHECK-NEXT: [[XOR:%.*]] = xor i32 [[X:%.*]], 1
-; CHECK-NEXT: [[B15:%.*]] = srem i32 ashr (i32 65536, i32 or (i32 zext (i1 icmp eq (i32* @g, i32* null) to i32), i32 65537)), [[XOR]]
-; CHECK-NEXT: [[B12:%.*]] = add nuw nsw i32 [[B15]], ashr (i32 65536, i32 or (i32 zext (i1 icmp eq (i32* @g, i32* null) to i32), i32 65537))
+; CHECK-NEXT: [[B15:%.*]] = srem i32 ashr (i32 65536, i32 or (i32 zext (i1 icmp eq (ptr @g, ptr null) to i32), i32 65537)), [[XOR]]
+; CHECK-NEXT: [[B12:%.*]] = add nuw nsw i32 [[B15]], ashr (i32 65536, i32 or (i32 zext (i1 icmp eq (ptr @g, ptr null) to i32), i32 65537))
; CHECK-NEXT: [[B:%.*]] = xor i32 [[B12]], -1
-; CHECK-NEXT: store i32 [[B]], i32* undef, align 4
+; CHECK-NEXT: store i32 [[B]], ptr undef, align 4
; CHECK-NEXT: ret void
;
entry:
%xor = xor i32 %x, 1
- %or4 = or i32 or (i32 zext (i1 icmp eq (i32* @g, i32* null) to i32), i32 1), 65536
+ %or4 = or i32 or (i32 zext (i1 icmp eq (ptr @g, ptr null) to i32), i32 1), 65536
%B6 = ashr i32 65536, %or4
%B15 = srem i32 %B6, %xor
%B20 = sdiv i32 %or4, 2147483647
%B2 = xor i32 %B12, %B8
%B3 = or i32 %B12, undef
%B = xor i32 %B2, %B3
- store i32 %B, i32* undef, align 4
+ store i32 %B, ptr undef, align 4
ret void
}
ret i32 %r
}
-define i32 @not_sub_extra_use(i32 %y, i32* %p) {
+define i32 @not_sub_extra_use(i32 %y, ptr %p) {
; CHECK-LABEL: @not_sub_extra_use(
; CHECK-NEXT: [[S:%.*]] = sub i32 123, [[Y:%.*]]
-; CHECK-NEXT: store i32 [[S]], i32* [[P:%.*]], align 4
+; CHECK-NEXT: store i32 [[S]], ptr [[P:%.*]], align 4
; CHECK-NEXT: [[R:%.*]] = add i32 [[Y]], -124
; CHECK-NEXT: ret i32 [[R]]
;
%s = sub i32 123, %y
- store i32 %s, i32* %p
+ store i32 %s, ptr %p
%r = xor i32 %s, -1
ret i32 %r
}
ret <2 x i32> %r
}
-define <2 x i32> @not_sub_extra_use_splat(<2 x i32> %y, <2 x i32>* %p) {
+define <2 x i32> @not_sub_extra_use_splat(<2 x i32> %y, ptr %p) {
; CHECK-LABEL: @not_sub_extra_use_splat(
; CHECK-NEXT: [[S:%.*]] = sub <2 x i32> <i32 123, i32 123>, [[Y:%.*]]
-; CHECK-NEXT: store <2 x i32> [[S]], <2 x i32>* [[P:%.*]], align 8
+; CHECK-NEXT: store <2 x i32> [[S]], ptr [[P:%.*]], align 8
; CHECK-NEXT: [[R:%.*]] = add <2 x i32> [[Y]], <i32 -124, i32 -124>
; CHECK-NEXT: ret <2 x i32> [[R]]
;
%s = sub <2 x i32> <i32 123, i32 123>, %y
- store <2 x i32> %s, <2 x i32>* %p
+ store <2 x i32> %s, ptr %p
%r = xor <2 x i32> %s, <i32 -1, i32 -1>
ret <2 x i32> %r
}
ret <2 x i32> %r
}
-define <2 x i32> @not_sub_extra_use_vec(<2 x i32> %y, <2 x i32>* %p) {
+define <2 x i32> @not_sub_extra_use_vec(<2 x i32> %y, ptr %p) {
; CHECK-LABEL: @not_sub_extra_use_vec(
; CHECK-NEXT: [[S:%.*]] = sub <2 x i32> <i32 123, i32 42>, [[Y:%.*]]
-; CHECK-NEXT: store <2 x i32> [[S]], <2 x i32>* [[P:%.*]], align 8
+; CHECK-NEXT: store <2 x i32> [[S]], ptr [[P:%.*]], align 8
; CHECK-NEXT: [[R:%.*]] = add <2 x i32> [[Y]], <i32 -124, i32 -43>
; CHECK-NEXT: ret <2 x i32> [[R]]
;
%s = sub <2 x i32> <i32 123, i32 42>, %y
- store <2 x i32> %s, <2 x i32>* %p
+ store <2 x i32> %s, ptr %p
%r = xor <2 x i32> %s, <i32 -1, i32 -1>
ret <2 x i32> %r
}
ret i1 %B
}
-define i64 @shl1(i64 %X, i64* %P) {
+define i64 @shl1(i64 %X, ptr %P) {
; CHECK-LABEL: @shl1(
; CHECK-NEXT: [[A:%.*]] = and i64 [[X:%.*]], 312
-; CHECK-NEXT: store i64 [[A]], i64* [[P:%.*]], align 4
+; CHECK-NEXT: store i64 [[A]], ptr [[P:%.*]], align 4
; CHECK-NEXT: [[B:%.*]] = shl nuw nsw i64 [[A]], 8
; CHECK-NEXT: ret i64 [[B]]
;
%A = and i64 %X, 312
- store i64 %A, i64* %P ; multiple uses of A.
+ store i64 %A, ptr %P ; multiple uses of A.
%B = shl i64 %A, 8
ret i64 %B
}
ret i1 %B
}
-define i64 @shl1(i64 %X, i64* %P) {
+define i64 @shl1(i64 %X, ptr %P) {
; CHECK-LABEL: @shl1(
; CHECK-NEXT: [[A:%.*]] = and i64 [[X:%.*]], 312
-; CHECK-NEXT: store i64 [[A]], i64* [[P:%.*]], align 4
+; CHECK-NEXT: store i64 [[A]], ptr [[P:%.*]], align 4
; CHECK-NEXT: [[B:%.*]] = shl nuw nsw i64 [[A]], 8
; CHECK-NEXT: ret i64 [[B]]
;
%A = and i64 %X, 312
- store i64 %A, i64* %P ; multiple uses of A.
+ store i64 %A, ptr %P ; multiple uses of A.
%B = shl i64 %A, 8
ret i64 %B
}
; RUN: opt -passes=instcombine -S < %s | FileCheck %s
-define void @test(<4 x float> *%in_ptr, <4 x float> *%out_ptr) {
- %A = load <4 x float>, <4 x float>* %in_ptr, align 16
+define void @test(ptr %in_ptr, ptr %out_ptr) {
+ %A = load <4 x float>, ptr %in_ptr, align 16
%B = shufflevector <4 x float> %A, <4 x float> poison, <4 x i32> <i32 0, i32 0, i32 undef, i32 undef>
%C = shufflevector <4 x float> %B, <4 x float> %A, <4 x i32> <i32 0, i32 1, i32 4, i32 undef>
%D = shufflevector <4 x float> %C, <4 x float> %A, <4 x i32> <i32 0, i32 1, i32 2, i32 4>
; CHECK: %D = shufflevector <4 x float> %A, <4 x float> poison, <4 x i32> zeroinitializer
- store <4 x float> %D, <4 x float> *%out_ptr
+ store <4 x float> %D, ptr %out_ptr
ret void
}
; RUN: opt -passes=instcombine -S < %s | FileCheck %s
-define void @test(<4 x float> *%in_ptr, <4 x float> *%out_ptr) {
- %A = load <4 x float>, <4 x float>* %in_ptr, align 16
+define void @test(ptr %in_ptr, ptr %out_ptr) {
+ %A = load <4 x float>, ptr %in_ptr, align 16
%B = shufflevector <4 x float> %A, <4 x float> undef, <4 x i32> <i32 0, i32 0, i32 undef, i32 undef>
%C = shufflevector <4 x float> %B, <4 x float> %A, <4 x i32> <i32 0, i32 1, i32 4, i32 undef>
%D = shufflevector <4 x float> %C, <4 x float> %A, <4 x i32> <i32 0, i32 1, i32 2, i32 4>
; CHECK: %D = shufflevector <4 x float> %A, <4 x float> poison, <4 x i32> zeroinitializer
- store <4 x float> %D, <4 x float> *%out_ptr
+ store <4 x float> %D, ptr %out_ptr
ret void
}
%opaque = type opaque
; CHECK: call i64 @llvm.objectsize.i64
-define void @foo(%opaque* sret(%opaque) %in, i64* %sizeptr) {
- %ptr = bitcast %opaque* %in to i8*
- %size = call i64 @llvm.objectsize.i64(i8* %ptr, i1 0, i1 0, i1 0)
- store i64 %size, i64* %sizeptr
+define void @foo(ptr sret(%opaque) %in, ptr %sizeptr) {
+ %size = call i64 @llvm.objectsize.i64(ptr %in, i1 0, i1 0, i1 0)
+ store i64 %size, ptr %sizeptr
ret void
}
-declare i64 @llvm.objectsize.i64(i8*, i1, i1, i1)
+declare i64 @llvm.objectsize.i64(ptr, i1, i1, i1)
; RUN: opt < %s -passes=instcombine -S | FileCheck %s
target datalayout = "e-p:64:64:64-i1:8:8-i8:8:8-i16:16:16-i32:32:32-i64:64:64-f32:32:32-f64:64:64-v64:64:64-v128:128:128-a0:0:64-s0:64:64-f80:128:128-n8:16:32:64-S128"
-declare noalias i8* @malloc(i64) nounwind allockind("alloc,uninitialized") allocsize(0)
-declare noalias nonnull i8* @_Znwm(i64) ; new(unsigned long)
+declare noalias ptr @malloc(i64) nounwind allockind("alloc,uninitialized") allocsize(0)
+declare noalias nonnull ptr @_Znwm(i64) ; new(unsigned long)
declare i32 @__gxx_personality_v0(...)
-declare void @__cxa_call_unexpected(i8*)
-declare i64 @llvm.objectsize.i64(i8*, i1) nounwind readonly
+declare void @__cxa_call_unexpected(ptr)
+declare i64 @llvm.objectsize.i64(ptr, i1) nounwind readonly
-define i64 @f1(i8 **%esc) {
+define i64 @f1(ptr %esc) {
; CHECK-LABEL: @f1(
-; CHECK-NEXT: [[CALL:%.*]] = call dereferenceable_or_null(4) i8* @malloc(i64 4)
-; CHECK-NEXT: store i8* [[CALL]], i8** [[ESC:%.*]], align 8
+; CHECK-NEXT: [[CALL:%.*]] = call dereferenceable_or_null(4) ptr @malloc(i64 4)
+; CHECK-NEXT: store ptr [[CALL]], ptr [[ESC:%.*]], align 8
; CHECK-NEXT: ret i64 4
;
- %call = call i8* @malloc(i64 4)
- store i8* %call, i8** %esc
- %size = call i64 @llvm.objectsize.i64(i8* %call, i1 false)
+ %call = call ptr @malloc(i64 4)
+ store ptr %call, ptr %esc
+ %size = call i64 @llvm.objectsize.i64(ptr %call, i1 false)
ret i64 %size
}
-define i64 @f2(i8** %esc) nounwind uwtable ssp personality i8* bitcast (i32 (...)* @__gxx_personality_v0 to i8*) {
+define i64 @f2(ptr %esc) nounwind uwtable ssp personality ptr @__gxx_personality_v0 {
; CHECK-LABEL: @f2(
; CHECK-NEXT: entry:
-; CHECK-NEXT: [[CALL:%.*]] = invoke noalias dereferenceable(13) i8* @_Znwm(i64 13)
+; CHECK-NEXT: [[CALL:%.*]] = invoke noalias dereferenceable(13) ptr @_Znwm(i64 13)
; CHECK-NEXT: to label [[INVOKE_CONT:%.*]] unwind label [[LPAD:%.*]]
; CHECK: invoke.cont:
-; CHECK-NEXT: store i8* [[CALL]], i8** [[ESC:%.*]], align 8
+; CHECK-NEXT: store ptr [[CALL]], ptr [[ESC:%.*]], align 8
; CHECK-NEXT: ret i64 13
; CHECK: lpad:
-; CHECK-NEXT: [[TMP0:%.*]] = landingpad { i8*, i32 }
-; CHECK-NEXT: filter [0 x i8*] zeroinitializer
-; CHECK-NEXT: [[TMP1:%.*]] = extractvalue { i8*, i32 } [[TMP0]], 0
-; CHECK-NEXT: tail call void @__cxa_call_unexpected(i8* [[TMP1]]) #[[ATTR3:[0-9]+]]
+; CHECK-NEXT: [[TMP0:%.*]] = landingpad { ptr, i32 }
+; CHECK-NEXT: filter [0 x ptr] zeroinitializer
+; CHECK-NEXT: [[TMP1:%.*]] = extractvalue { ptr, i32 } [[TMP0]], 0
+; CHECK-NEXT: tail call void @__cxa_call_unexpected(ptr [[TMP1]]) #[[ATTR3:[0-9]+]]
; CHECK-NEXT: unreachable
;
entry:
- %call = invoke noalias i8* @_Znwm(i64 13)
+ %call = invoke noalias ptr @_Znwm(i64 13)
to label %invoke.cont unwind label %lpad
invoke.cont:
- store i8* %call, i8** %esc
- %0 = tail call i64 @llvm.objectsize.i64(i8* %call, i1 false)
+ store ptr %call, ptr %esc
+ %0 = tail call i64 @llvm.objectsize.i64(ptr %call, i1 false)
ret i64 %0
lpad:
- %1 = landingpad { i8*, i32 }
- filter [0 x i8*] zeroinitializer
- %2 = extractvalue { i8*, i32 } %1, 0
- tail call void @__cxa_call_unexpected(i8* %2) noreturn nounwind
+ %1 = landingpad { ptr, i32 }
+ filter [0 x ptr] zeroinitializer
+ %2 = extractvalue { ptr, i32 } %1, 0
+ tail call void @__cxa_call_unexpected(ptr %2) noreturn nounwind
unreachable
}
; RUN: opt -S -passes=instcombine -o - %s | FileCheck %s
target datalayout = "e-p:32:32:32-p1:64:64:64-p2:8:8:8-p3:16:16:16-i1:8:8-i8:8:8-i16:16:16-i32:32:32-i64:32:32"
-declare i32 @llvm.objectsize.i32.p0i8(i8*, i1) nounwind readonly
-declare i32 @llvm.objectsize.i32.p1i8(i8 addrspace(1)*, i1) nounwind readonly
-declare i32 @llvm.objectsize.i32.p2i8(i8 addrspace(2)*, i1) nounwind readonly
-declare i32 @llvm.objectsize.i32.p3i8(i8 addrspace(3)*, i1) nounwind readonly
-declare i16 @llvm.objectsize.i16.p3i8(i8 addrspace(3)*, i1) nounwind readonly
+declare i32 @llvm.objectsize.i32.p0(ptr, i1) nounwind readonly
+declare i32 @llvm.objectsize.i32.p1(ptr addrspace(1), i1) nounwind readonly
+declare i32 @llvm.objectsize.i32.p2(ptr addrspace(2), i1) nounwind readonly
+declare i32 @llvm.objectsize.i32.p3(ptr addrspace(3), i1) nounwind readonly
+declare i16 @llvm.objectsize.i16.p3(ptr addrspace(3), i1) nounwind readonly
@array_as2 = private addrspace(2) global [60 x i8] zeroinitializer, align 4
-@array_as1_pointers = private global [10 x i32 addrspace(1)*] zeroinitializer, align 4
-@array_as2_pointers = private global [24 x i32 addrspace(2)*] zeroinitializer, align 4
-@array_as3_pointers = private global [42 x i32 addrspace(3)*] zeroinitializer, align 4
+@array_as1_pointers = private global [10 x ptr addrspace(1)] zeroinitializer, align 4
+@array_as2_pointers = private global [24 x ptr addrspace(2)] zeroinitializer, align 4
+@array_as3_pointers = private global [42 x ptr addrspace(3)] zeroinitializer, align 4
-@array_as2_as1_pointer_pointers = private global [16 x i32 addrspace(2)* addrspace(1)*] zeroinitializer, align 4
+@array_as2_as1_pointer_pointers = private global [16 x ptr addrspace(1)] zeroinitializer, align 4
@a_as3 = private addrspace(3) global [60 x i8] zeroinitializer, align 1
define i32 @foo_as3() nounwind {
; CHECK-LABEL: @foo_as3(
; CHECK-NEXT: ret i32 60
- %1 = call i32 @llvm.objectsize.i32.p3i8(i8 addrspace(3)* getelementptr inbounds ([60 x i8], [60 x i8] addrspace(3)* @a_as3, i32 0, i32 0), i1 false)
+ %1 = call i32 @llvm.objectsize.i32.p3(ptr addrspace(3) @a_as3, i1 false)
ret i32 %1
}
define i16 @foo_as3_i16() nounwind {
; CHECK-LABEL: @foo_as3_i16(
; CHECK-NEXT: ret i16 60
- %1 = call i16 @llvm.objectsize.i16.p3i8(i8 addrspace(3)* getelementptr inbounds ([60 x i8], [60 x i8] addrspace(3)* @a_as3, i32 0, i32 0), i1 false)
+ %1 = call i16 @llvm.objectsize.i16.p3(ptr addrspace(3) @a_as3, i1 false)
ret i16 %1
}
-@a_alias = weak alias [60 x i8], [60 x i8] addrspace(3)* @a_as3
+@a_alias = weak alias [60 x i8], ptr addrspace(3) @a_as3
define i32 @foo_alias() nounwind {
- %1 = call i32 @llvm.objectsize.i32.p3i8(i8 addrspace(3)* getelementptr inbounds ([60 x i8], [60 x i8] addrspace(3)* @a_alias, i32 0, i32 0), i1 false)
+ %1 = call i32 @llvm.objectsize.i32.p3(ptr addrspace(3) @a_alias, i1 false)
ret i32 %1
}
define i32 @array_as2_size() {
; CHECK-LABEL: @array_as2_size(
; CHECK-NEXT: ret i32 60
- %bc = bitcast [60 x i8] addrspace(2)* @array_as2 to i8 addrspace(2)*
- %1 = call i32 @llvm.objectsize.i32.p2i8(i8 addrspace(2)* %bc, i1 false)
+ %1 = call i32 @llvm.objectsize.i32.p2(ptr addrspace(2) @array_as2, i1 false)
ret i32 %1
}
define i32 @pointer_array_as1() {
; CHECK-LABEL: @pointer_array_as1(
; CHECK-NEXT: ret i32 80
- %bc = addrspacecast [10 x i32 addrspace(1)*]* @array_as1_pointers to i8 addrspace(1)*
- %1 = call i32 @llvm.objectsize.i32.p1i8(i8 addrspace(1)* %bc, i1 false)
+ %bc = addrspacecast ptr @array_as1_pointers to ptr addrspace(1)
+ %1 = call i32 @llvm.objectsize.i32.p1(ptr addrspace(1) %bc, i1 false)
ret i32 %1
}
define i32 @pointer_array_as2() {
; CHECK-LABEL: @pointer_array_as2(
; CHECK-NEXT: ret i32 24
- %bc = bitcast [24 x i32 addrspace(2)*]* @array_as2_pointers to i8*
- %1 = call i32 @llvm.objectsize.i32.p0i8(i8* %bc, i1 false)
+ %1 = call i32 @llvm.objectsize.i32.p0(ptr @array_as2_pointers, i1 false)
ret i32 %1
}
define i32 @pointer_array_as3() {
; CHECK-LABEL: @pointer_array_as3(
; CHECK-NEXT: ret i32 84
- %bc = bitcast [42 x i32 addrspace(3)*]* @array_as3_pointers to i8*
- %1 = call i32 @llvm.objectsize.i32.p0i8(i8* %bc, i1 false)
+ %1 = call i32 @llvm.objectsize.i32.p0(ptr @array_as3_pointers, i1 false)
ret i32 %1
}
define i32 @pointer_pointer_array_as2_as1() {
; CHECK-LABEL: @pointer_pointer_array_as2_as1(
; CHECK-NEXT: ret i32 128
- %bc = bitcast [16 x i32 addrspace(2)* addrspace(1)*]* @array_as2_as1_pointer_pointers to i8*
- %1 = call i32 @llvm.objectsize.i32.p0i8(i8* %bc, i1 false)
+ %1 = call i32 @llvm.objectsize.i32.p0(ptr @array_as2_as1_pointer_pointers, i1 false)
ret i32 %1
}
@g4 = internal constant i32 4
define i32 @test() {
- %A = load i32, i32* @g1
- %B = load i32, i32* @g2
- %C = load i32, i32* @g3
- %D = load i32, i32* @g4
+ %A = load i32, ptr @g1
+ %B = load i32, ptr @g2
+ %C = load i32, ptr @g3
+ %D = load i32, ptr @g4
%a = add i32 %A, %B
%b = add i32 %a, %C
; Extra use
; Expect to fold
-define i1 @foo1_and_extra_use_shl(i32 %k, i32 %c1, i32 %c2, i32* %p) {
+define i1 @foo1_and_extra_use_shl(i32 %k, i32 %c1, i32 %c2, ptr %p) {
; CHECK-LABEL: @foo1_and_extra_use_shl(
; CHECK-NEXT: [[T0:%.*]] = shl i32 1, [[C1:%.*]]
-; CHECK-NEXT: store i32 [[T0]], i32* [[P:%.*]], align 4
+; CHECK-NEXT: store i32 [[T0]], ptr [[P:%.*]], align 4
; CHECK-NEXT: [[T1:%.*]] = shl i32 1, [[C2:%.*]]
; CHECK-NEXT: [[TMP1:%.*]] = or i32 [[T0]], [[T1]]
; CHECK-NEXT: [[TMP2:%.*]] = and i32 [[TMP1]], [[K:%.*]]
; CHECK-NEXT: ret i1 [[TMP3]]
;
%t0 = shl i32 1, %c1
- store i32 %t0, i32* %p ; extra use of shl
+ store i32 %t0, ptr %p ; extra use of shl
%t1 = shl i32 1, %c2
%t2 = and i32 %t0, %k
%t3 = icmp eq i32 %t2, 0
ret i1 %or
}
-define i1 @foo1_and_extra_use_shl_logical(i32 %k, i32 %c1, i32 %c2, i32* %p) {
+define i1 @foo1_and_extra_use_shl_logical(i32 %k, i32 %c1, i32 %c2, ptr %p) {
; CHECK-LABEL: @foo1_and_extra_use_shl_logical(
; CHECK-NEXT: [[T0:%.*]] = shl i32 1, [[C1:%.*]]
-; CHECK-NEXT: store i32 [[T0]], i32* [[P:%.*]], align 4
+; CHECK-NEXT: store i32 [[T0]], ptr [[P:%.*]], align 4
; CHECK-NEXT: [[T1:%.*]] = shl i32 1, [[C2:%.*]]
; CHECK-NEXT: [[TMP1:%.*]] = freeze i32 [[T1]]
; CHECK-NEXT: [[TMP2:%.*]] = or i32 [[T0]], [[TMP1]]
; CHECK-NEXT: ret i1 [[TMP4]]
;
%t0 = shl i32 1, %c1
- store i32 %t0, i32* %p ; extra use of shl
+ store i32 %t0, ptr %p ; extra use of shl
%t1 = shl i32 1, %c2
%t2 = and i32 %t0, %k
%t3 = icmp eq i32 %t2, 0
}
; Should not fold
-define i1 @foo1_and_extra_use_and(i32 %k, i32 %c1, i32 %c2, i32* %p) {
+define i1 @foo1_and_extra_use_and(i32 %k, i32 %c1, i32 %c2, ptr %p) {
; CHECK-LABEL: @foo1_and_extra_use_and(
; CHECK-NEXT: [[T0:%.*]] = shl i32 1, [[C1:%.*]]
; CHECK-NEXT: [[T1:%.*]] = shl i32 1, [[C2:%.*]]
; CHECK-NEXT: [[T2:%.*]] = and i32 [[T0]], [[K:%.*]]
-; CHECK-NEXT: store i32 [[T2]], i32* [[P:%.*]], align 4
+; CHECK-NEXT: store i32 [[T2]], ptr [[P:%.*]], align 4
; CHECK-NEXT: [[TMP1:%.*]] = or i32 [[T0]], [[T1]]
; CHECK-NEXT: [[TMP2:%.*]] = and i32 [[TMP1]], [[K]]
; CHECK-NEXT: [[TMP3:%.*]] = icmp ne i32 [[TMP2]], [[TMP1]]
%t0 = shl i32 1, %c1
%t1 = shl i32 1, %c2
%t2 = and i32 %t0, %k
- store i32 %t2, i32* %p ; extra use of and
+ store i32 %t2, ptr %p ; extra use of and
%t3 = icmp eq i32 %t2, 0
%t4 = and i32 %t1, %k
%t5 = icmp eq i32 %t4, 0
ret i1 %or
}
-define i1 @foo1_and_extra_use_and_logical(i32 %k, i32 %c1, i32 %c2, i32* %p) {
+define i1 @foo1_and_extra_use_and_logical(i32 %k, i32 %c1, i32 %c2, ptr %p) {
; CHECK-LABEL: @foo1_and_extra_use_and_logical(
; CHECK-NEXT: [[T0:%.*]] = shl i32 1, [[C1:%.*]]
; CHECK-NEXT: [[T1:%.*]] = shl i32 1, [[C2:%.*]]
; CHECK-NEXT: [[T2:%.*]] = and i32 [[T0]], [[K:%.*]]
-; CHECK-NEXT: store i32 [[T2]], i32* [[P:%.*]], align 4
+; CHECK-NEXT: store i32 [[T2]], ptr [[P:%.*]], align 4
; CHECK-NEXT: [[TMP1:%.*]] = freeze i32 [[T1]]
; CHECK-NEXT: [[TMP2:%.*]] = or i32 [[T0]], [[TMP1]]
; CHECK-NEXT: [[TMP3:%.*]] = and i32 [[TMP2]], [[K]]
%t0 = shl i32 1, %c1
%t1 = shl i32 1, %c2
%t2 = and i32 %t0, %k
- store i32 %t2, i32* %p ; extra use of and
+ store i32 %t2, ptr %p ; extra use of and
%t3 = icmp eq i32 %t2, 0
%t4 = and i32 %t1, %k
%t5 = icmp eq i32 %t4, 0
}
; Should not fold
-define i1 @foo1_and_extra_use_cmp(i32 %k, i32 %c1, i32 %c2, i1* %p) {
+define i1 @foo1_and_extra_use_cmp(i32 %k, i32 %c1, i32 %c2, ptr %p) {
; CHECK-LABEL: @foo1_and_extra_use_cmp(
; CHECK-NEXT: [[T0:%.*]] = shl i32 1, [[C1:%.*]]
; CHECK-NEXT: [[T1:%.*]] = shl i32 1, [[C2:%.*]]
; CHECK-NEXT: [[T2:%.*]] = and i32 [[T0]], [[K:%.*]]
; CHECK-NEXT: [[T3:%.*]] = icmp eq i32 [[T2]], 0
-; CHECK-NEXT: store i1 [[T3]], i1* [[P:%.*]], align 1
+; CHECK-NEXT: store i1 [[T3]], ptr [[P:%.*]], align 1
; CHECK-NEXT: [[TMP1:%.*]] = or i32 [[T0]], [[T1]]
; CHECK-NEXT: [[TMP2:%.*]] = and i32 [[TMP1]], [[K]]
; CHECK-NEXT: [[TMP3:%.*]] = icmp ne i32 [[TMP2]], [[TMP1]]
%t1 = shl i32 1, %c2
%t2 = and i32 %t0, %k
%t3 = icmp eq i32 %t2, 0
- store i1 %t3, i1* %p ; extra use of cmp
+ store i1 %t3, ptr %p ; extra use of cmp
%t4 = and i32 %t1, %k
%t5 = icmp eq i32 %t4, 0
%or = or i1 %t3, %t5
ret i1 %or
}
-define i1 @foo1_and_extra_use_cmp_logical(i32 %k, i32 %c1, i32 %c2, i1* %p) {
+define i1 @foo1_and_extra_use_cmp_logical(i32 %k, i32 %c1, i32 %c2, ptr %p) {
; CHECK-LABEL: @foo1_and_extra_use_cmp_logical(
; CHECK-NEXT: [[T0:%.*]] = shl i32 1, [[C1:%.*]]
; CHECK-NEXT: [[T1:%.*]] = shl i32 1, [[C2:%.*]]
; CHECK-NEXT: [[T2:%.*]] = and i32 [[T0]], [[K:%.*]]
; CHECK-NEXT: [[T3:%.*]] = icmp eq i32 [[T2]], 0
-; CHECK-NEXT: store i1 [[T3]], i1* [[P:%.*]], align 1
+; CHECK-NEXT: store i1 [[T3]], ptr [[P:%.*]], align 1
; CHECK-NEXT: [[TMP1:%.*]] = freeze i32 [[T1]]
; CHECK-NEXT: [[TMP2:%.*]] = or i32 [[T0]], [[TMP1]]
; CHECK-NEXT: [[TMP3:%.*]] = and i32 [[TMP2]], [[K]]
%t1 = shl i32 1, %c2
%t2 = and i32 %t0, %k
%t3 = icmp eq i32 %t2, 0
- store i1 %t3, i1* %p ; extra use of cmp
+ store i1 %t3, ptr %p ; extra use of cmp
%t4 = and i32 %t1, %k
%t5 = icmp eq i32 %t4, 0
%or = select i1 %t3, i1 true, i1 %t5
}
; Expect to fold
-define i1 @foo1_and_extra_use_shl2(i32 %k, i32 %c1, i32 %c2, i32* %p) {
+define i1 @foo1_and_extra_use_shl2(i32 %k, i32 %c1, i32 %c2, ptr %p) {
; CHECK-LABEL: @foo1_and_extra_use_shl2(
; CHECK-NEXT: [[T0:%.*]] = shl i32 1, [[C1:%.*]]
; CHECK-NEXT: [[T1:%.*]] = shl i32 1, [[C2:%.*]]
-; CHECK-NEXT: store i32 [[T1]], i32* [[P:%.*]], align 4
+; CHECK-NEXT: store i32 [[T1]], ptr [[P:%.*]], align 4
; CHECK-NEXT: [[TMP1:%.*]] = or i32 [[T0]], [[T1]]
; CHECK-NEXT: [[TMP2:%.*]] = and i32 [[TMP1]], [[K:%.*]]
; CHECK-NEXT: [[TMP3:%.*]] = icmp ne i32 [[TMP2]], [[TMP1]]
;
%t0 = shl i32 1, %c1
%t1 = shl i32 1, %c2
- store i32 %t1, i32* %p ; extra use of shl
+ store i32 %t1, ptr %p ; extra use of shl
%t2 = and i32 %t0, %k
%t3 = icmp eq i32 %t2, 0
%t4 = and i32 %t1, %k
ret i1 %or
}
-define i1 @foo1_and_extra_use_shl2_logical(i32 %k, i32 %c1, i32 %c2, i32* %p) {
+define i1 @foo1_and_extra_use_shl2_logical(i32 %k, i32 %c1, i32 %c2, ptr %p) {
; CHECK-LABEL: @foo1_and_extra_use_shl2_logical(
; CHECK-NEXT: [[T0:%.*]] = shl i32 1, [[C1:%.*]]
; CHECK-NEXT: [[T1:%.*]] = shl i32 1, [[C2:%.*]]
; CHECK-NEXT: [[TMP1:%.*]] = freeze i32 [[T1]]
-; CHECK-NEXT: store i32 [[TMP1]], i32* [[P:%.*]], align 4
+; CHECK-NEXT: store i32 [[TMP1]], ptr [[P:%.*]], align 4
; CHECK-NEXT: [[TMP2:%.*]] = or i32 [[T0]], [[TMP1]]
; CHECK-NEXT: [[TMP3:%.*]] = and i32 [[TMP2]], [[K:%.*]]
; CHECK-NEXT: [[TMP4:%.*]] = icmp ne i32 [[TMP3]], [[TMP2]]
;
%t0 = shl i32 1, %c1
%t1 = shl i32 1, %c2
- store i32 %t1, i32* %p ; extra use of shl
+ store i32 %t1, ptr %p ; extra use of shl
%t2 = and i32 %t0, %k
%t3 = icmp eq i32 %t2, 0
%t4 = and i32 %t1, %k
}
; Should not fold
-define i1 @foo1_and_extra_use_and2(i32 %k, i32 %c1, i32 %c2, i32* %p) {
+define i1 @foo1_and_extra_use_and2(i32 %k, i32 %c1, i32 %c2, ptr %p) {
; CHECK-LABEL: @foo1_and_extra_use_and2(
; CHECK-NEXT: [[T0:%.*]] = shl i32 1, [[C1:%.*]]
; CHECK-NEXT: [[T1:%.*]] = shl i32 1, [[C2:%.*]]
; CHECK-NEXT: [[T4:%.*]] = and i32 [[T1]], [[K:%.*]]
-; CHECK-NEXT: store i32 [[T4]], i32* [[P:%.*]], align 4
+; CHECK-NEXT: store i32 [[T4]], ptr [[P:%.*]], align 4
; CHECK-NEXT: [[TMP1:%.*]] = or i32 [[T0]], [[T1]]
; CHECK-NEXT: [[TMP2:%.*]] = and i32 [[TMP1]], [[K]]
; CHECK-NEXT: [[TMP3:%.*]] = icmp ne i32 [[TMP2]], [[TMP1]]
%t2 = and i32 %t0, %k
%t3 = icmp eq i32 %t2, 0
%t4 = and i32 %t1, %k
- store i32 %t4, i32* %p ; extra use of and
+ store i32 %t4, ptr %p ; extra use of and
%t5 = icmp eq i32 %t4, 0
%or = or i1 %t3, %t5
ret i1 %or
}
-define i1 @foo1_and_extra_use_and2_logical(i32 %k, i32 %c1, i32 %c2, i32* %p) {
+define i1 @foo1_and_extra_use_and2_logical(i32 %k, i32 %c1, i32 %c2, ptr %p) {
; CHECK-LABEL: @foo1_and_extra_use_and2_logical(
; CHECK-NEXT: [[T0:%.*]] = shl i32 1, [[C1:%.*]]
; CHECK-NEXT: [[T1:%.*]] = shl i32 1, [[C2:%.*]]
; CHECK-NEXT: [[TMP1:%.*]] = freeze i32 [[T1]]
; CHECK-NEXT: [[T4:%.*]] = and i32 [[TMP1]], [[K:%.*]]
-; CHECK-NEXT: store i32 [[T4]], i32* [[P:%.*]], align 4
+; CHECK-NEXT: store i32 [[T4]], ptr [[P:%.*]], align 4
; CHECK-NEXT: [[TMP2:%.*]] = or i32 [[T0]], [[TMP1]]
; CHECK-NEXT: [[TMP3:%.*]] = and i32 [[TMP2]], [[K]]
; CHECK-NEXT: [[TMP4:%.*]] = icmp ne i32 [[TMP3]], [[TMP2]]
%t2 = and i32 %t0, %k
%t3 = icmp eq i32 %t2, 0
%t4 = and i32 %t1, %k
- store i32 %t4, i32* %p ; extra use of and
+ store i32 %t4, ptr %p ; extra use of and
%t5 = icmp eq i32 %t4, 0
%or = select i1 %t3, i1 true, i1 %t5
ret i1 %or
}
; Should not fold
-define i1 @foo1_and_extra_use_cmp2(i32 %k, i32 %c1, i32 %c2, i1* %p) {
+define i1 @foo1_and_extra_use_cmp2(i32 %k, i32 %c1, i32 %c2, ptr %p) {
; CHECK-LABEL: @foo1_and_extra_use_cmp2(
; CHECK-NEXT: [[T0:%.*]] = shl i32 1, [[C1:%.*]]
; CHECK-NEXT: [[T1:%.*]] = shl i32 1, [[C2:%.*]]
; CHECK-NEXT: [[T4:%.*]] = and i32 [[T1]], [[K:%.*]]
; CHECK-NEXT: [[T5:%.*]] = icmp eq i32 [[T4]], 0
-; CHECK-NEXT: store i1 [[T5]], i1* [[P:%.*]], align 1
+; CHECK-NEXT: store i1 [[T5]], ptr [[P:%.*]], align 1
; CHECK-NEXT: [[TMP1:%.*]] = or i32 [[T0]], [[T1]]
; CHECK-NEXT: [[TMP2:%.*]] = and i32 [[TMP1]], [[K]]
; CHECK-NEXT: [[TMP3:%.*]] = icmp ne i32 [[TMP2]], [[TMP1]]
%t3 = icmp eq i32 %t2, 0
%t4 = and i32 %t1, %k
%t5 = icmp eq i32 %t4, 0
- store i1 %t5, i1* %p ; extra use of cmp
+ store i1 %t5, ptr %p ; extra use of cmp
%or = or i1 %t3, %t5
ret i1 %or
}
-define i1 @foo1_and_extra_use_cmp2_logical(i32 %k, i32 %c1, i32 %c2, i1* %p) {
+define i1 @foo1_and_extra_use_cmp2_logical(i32 %k, i32 %c1, i32 %c2, ptr %p) {
; CHECK-LABEL: @foo1_and_extra_use_cmp2_logical(
; CHECK-NEXT: [[T0:%.*]] = shl i32 1, [[C1:%.*]]
; CHECK-NEXT: [[T1:%.*]] = shl i32 1, [[C2:%.*]]
; CHECK-NEXT: [[TMP1:%.*]] = freeze i32 [[T1]]
; CHECK-NEXT: [[T4:%.*]] = and i32 [[TMP1]], [[K:%.*]]
; CHECK-NEXT: [[T5:%.*]] = icmp eq i32 [[T4]], 0
-; CHECK-NEXT: store i1 [[T5]], i1* [[P:%.*]], align 1
+; CHECK-NEXT: store i1 [[T5]], ptr [[P:%.*]], align 1
; CHECK-NEXT: [[TMP2:%.*]] = or i32 [[T0]], [[TMP1]]
; CHECK-NEXT: [[TMP3:%.*]] = and i32 [[TMP2]], [[K]]
; CHECK-NEXT: [[TMP4:%.*]] = icmp ne i32 [[TMP3]], [[TMP2]]
%t3 = icmp eq i32 %t2, 0
%t4 = and i32 %t1, %k
%t5 = icmp eq i32 %t4, 0
- store i1 %t5, i1* %p ; extra use of cmp
+ store i1 %t5, ptr %p ; extra use of cmp
%or = select i1 %t3, i1 true, i1 %t5
ret i1 %or
}
; Shift-of-signbit replaced with 'icmp s*'
; Expect to fold
-define i1 @foo1_and_signbit_lshr_without_shifting_signbit_extra_use_shl1(i32 %k, i32 %c1, i32 %c2, i32* %p) {
+define i1 @foo1_and_signbit_lshr_without_shifting_signbit_extra_use_shl1(i32 %k, i32 %c1, i32 %c2, ptr %p) {
; CHECK-LABEL: @foo1_and_signbit_lshr_without_shifting_signbit_extra_use_shl1(
; CHECK-NEXT: [[T0:%.*]] = shl i32 1, [[C1:%.*]]
-; CHECK-NEXT: store i32 [[T0]], i32* [[P:%.*]], align 4
+; CHECK-NEXT: store i32 [[T0]], ptr [[P:%.*]], align 4
; CHECK-NEXT: [[T1:%.*]] = and i32 [[T0]], [[K:%.*]]
; CHECK-NEXT: [[T2:%.*]] = icmp eq i32 [[T1]], 0
; CHECK-NEXT: [[T3:%.*]] = shl i32 [[K]], [[C2:%.*]]
; CHECK-NEXT: ret i1 [[OR]]
;
%t0 = shl i32 1, %c1
- store i32 %t0, i32* %p ; extra use of shl
+ store i32 %t0, ptr %p ; extra use of shl
%t1 = and i32 %t0, %k
%t2 = icmp eq i32 %t1, 0
%t3 = shl i32 %k, %c2
ret i1 %or
}
-define i1 @foo1_and_signbit_lshr_without_shifting_signbit_extra_use_shl1_logical(i32 %k, i32 %c1, i32 %c2, i32* %p) {
+define i1 @foo1_and_signbit_lshr_without_shifting_signbit_extra_use_shl1_logical(i32 %k, i32 %c1, i32 %c2, ptr %p) {
; CHECK-LABEL: @foo1_and_signbit_lshr_without_shifting_signbit_extra_use_shl1_logical(
; CHECK-NEXT: [[T0:%.*]] = shl i32 1, [[C1:%.*]]
-; CHECK-NEXT: store i32 [[T0]], i32* [[P:%.*]], align 4
+; CHECK-NEXT: store i32 [[T0]], ptr [[P:%.*]], align 4
; CHECK-NEXT: [[T1:%.*]] = and i32 [[T0]], [[K:%.*]]
; CHECK-NEXT: [[T2:%.*]] = icmp eq i32 [[T1]], 0
; CHECK-NEXT: [[T3:%.*]] = shl i32 [[K]], [[C2:%.*]]
; CHECK-NEXT: ret i1 [[OR]]
;
%t0 = shl i32 1, %c1
- store i32 %t0, i32* %p ; extra use of shl
+ store i32 %t0, ptr %p ; extra use of shl
%t1 = and i32 %t0, %k
%t2 = icmp eq i32 %t1, 0
%t3 = shl i32 %k, %c2
}
; Not fold
-define i1 @foo1_and_signbit_lshr_without_shifting_signbit_extra_use_and(i32 %k, i32 %c1, i32 %c2, i32* %p) {
+define i1 @foo1_and_signbit_lshr_without_shifting_signbit_extra_use_and(i32 %k, i32 %c1, i32 %c2, ptr %p) {
; CHECK-LABEL: @foo1_and_signbit_lshr_without_shifting_signbit_extra_use_and(
; CHECK-NEXT: [[T0:%.*]] = shl i32 1, [[C1:%.*]]
; CHECK-NEXT: [[T1:%.*]] = and i32 [[T0]], [[K:%.*]]
-; CHECK-NEXT: store i32 [[T1]], i32* [[P:%.*]], align 4
+; CHECK-NEXT: store i32 [[T1]], ptr [[P:%.*]], align 4
; CHECK-NEXT: [[T2:%.*]] = icmp eq i32 [[T1]], 0
; CHECK-NEXT: [[T3:%.*]] = shl i32 [[K]], [[C2:%.*]]
; CHECK-NEXT: [[T4:%.*]] = icmp sgt i32 [[T3]], -1
;
%t0 = shl i32 1, %c1
%t1 = and i32 %t0, %k
- store i32 %t1, i32* %p ; extra use of and
+ store i32 %t1, ptr %p ; extra use of and
%t2 = icmp eq i32 %t1, 0
%t3 = shl i32 %k, %c2
%t4 = icmp sgt i32 %t3, -1
ret i1 %or
}
-define i1 @foo1_and_signbit_lshr_without_shifting_signbit_extra_use_and_logical(i32 %k, i32 %c1, i32 %c2, i32* %p) {
+define i1 @foo1_and_signbit_lshr_without_shifting_signbit_extra_use_and_logical(i32 %k, i32 %c1, i32 %c2, ptr %p) {
; CHECK-LABEL: @foo1_and_signbit_lshr_without_shifting_signbit_extra_use_and_logical(
; CHECK-NEXT: [[T0:%.*]] = shl i32 1, [[C1:%.*]]
; CHECK-NEXT: [[T1:%.*]] = and i32 [[T0]], [[K:%.*]]
-; CHECK-NEXT: store i32 [[T1]], i32* [[P:%.*]], align 4
+; CHECK-NEXT: store i32 [[T1]], ptr [[P:%.*]], align 4
; CHECK-NEXT: [[T2:%.*]] = icmp eq i32 [[T1]], 0
; CHECK-NEXT: [[T3:%.*]] = shl i32 [[K]], [[C2:%.*]]
; CHECK-NEXT: [[T4:%.*]] = icmp sgt i32 [[T3]], -1
;
%t0 = shl i32 1, %c1
%t1 = and i32 %t0, %k
- store i32 %t1, i32* %p ; extra use of and
+ store i32 %t1, ptr %p ; extra use of and
%t2 = icmp eq i32 %t1, 0
%t3 = shl i32 %k, %c2
%t4 = icmp sgt i32 %t3, -1
}
; Not fold
-define i1 @foo1_and_signbit_lshr_without_shifting_signbit_extra_use_cmp1(i32 %k, i32 %c1, i32 %c2, i1* %p) {
+define i1 @foo1_and_signbit_lshr_without_shifting_signbit_extra_use_cmp1(i32 %k, i32 %c1, i32 %c2, ptr %p) {
; CHECK-LABEL: @foo1_and_signbit_lshr_without_shifting_signbit_extra_use_cmp1(
; CHECK-NEXT: [[T0:%.*]] = shl i32 1, [[C1:%.*]]
; CHECK-NEXT: [[T1:%.*]] = and i32 [[T0]], [[K:%.*]]
; CHECK-NEXT: [[T2:%.*]] = icmp eq i32 [[T1]], 0
-; CHECK-NEXT: store i1 [[T2]], i1* [[P:%.*]], align 1
+; CHECK-NEXT: store i1 [[T2]], ptr [[P:%.*]], align 1
; CHECK-NEXT: [[T3:%.*]] = shl i32 [[K]], [[C2:%.*]]
; CHECK-NEXT: [[T4:%.*]] = icmp sgt i32 [[T3]], -1
; CHECK-NEXT: [[OR:%.*]] = or i1 [[T2]], [[T4]]
%t0 = shl i32 1, %c1
%t1 = and i32 %t0, %k
%t2 = icmp eq i32 %t1, 0
- store i1 %t2, i1* %p ; extra use of cmp
+ store i1 %t2, ptr %p ; extra use of cmp
%t3 = shl i32 %k, %c2
%t4 = icmp sgt i32 %t3, -1
%or = or i1 %t2, %t4
ret i1 %or
}
-define i1 @foo1_and_signbit_lshr_without_shifting_signbit_extra_use_cmp1_logical(i32 %k, i32 %c1, i32 %c2, i1* %p) {
+define i1 @foo1_and_signbit_lshr_without_shifting_signbit_extra_use_cmp1_logical(i32 %k, i32 %c1, i32 %c2, ptr %p) {
; CHECK-LABEL: @foo1_and_signbit_lshr_without_shifting_signbit_extra_use_cmp1_logical(
; CHECK-NEXT: [[T0:%.*]] = shl i32 1, [[C1:%.*]]
; CHECK-NEXT: [[T1:%.*]] = and i32 [[T0]], [[K:%.*]]
; CHECK-NEXT: [[T2:%.*]] = icmp eq i32 [[T1]], 0
-; CHECK-NEXT: store i1 [[T2]], i1* [[P:%.*]], align 1
+; CHECK-NEXT: store i1 [[T2]], ptr [[P:%.*]], align 1
; CHECK-NEXT: [[T3:%.*]] = shl i32 [[K]], [[C2:%.*]]
; CHECK-NEXT: [[T4:%.*]] = icmp sgt i32 [[T3]], -1
; CHECK-NEXT: [[OR:%.*]] = select i1 [[T2]], i1 true, i1 [[T4]]
%t0 = shl i32 1, %c1
%t1 = and i32 %t0, %k
%t2 = icmp eq i32 %t1, 0
- store i1 %t2, i1* %p ; extra use of cmp
+ store i1 %t2, ptr %p ; extra use of cmp
%t3 = shl i32 %k, %c2
%t4 = icmp sgt i32 %t3, -1
%or = select i1 %t2, i1 true, i1 %t4
}
; Not fold
-define i1 @foo1_and_signbit_lshr_without_shifting_signbit_extra_use_shl2(i32 %k, i32 %c1, i32 %c2, i32* %p) {
+define i1 @foo1_and_signbit_lshr_without_shifting_signbit_extra_use_shl2(i32 %k, i32 %c1, i32 %c2, ptr %p) {
; CHECK-LABEL: @foo1_and_signbit_lshr_without_shifting_signbit_extra_use_shl2(
; CHECK-NEXT: [[T0:%.*]] = shl i32 1, [[C1:%.*]]
; CHECK-NEXT: [[T1:%.*]] = and i32 [[T0]], [[K:%.*]]
; CHECK-NEXT: [[T2:%.*]] = icmp eq i32 [[T1]], 0
; CHECK-NEXT: [[T3:%.*]] = shl i32 [[K]], [[C2:%.*]]
-; CHECK-NEXT: store i32 [[T3]], i32* [[P:%.*]], align 4
+; CHECK-NEXT: store i32 [[T3]], ptr [[P:%.*]], align 4
; CHECK-NEXT: [[T4:%.*]] = icmp sgt i32 [[T3]], -1
; CHECK-NEXT: [[OR:%.*]] = or i1 [[T2]], [[T4]]
; CHECK-NEXT: ret i1 [[OR]]
%t1 = and i32 %t0, %k
%t2 = icmp eq i32 %t1, 0
%t3 = shl i32 %k, %c2
- store i32 %t3, i32* %p ; extra use of shl
+ store i32 %t3, ptr %p ; extra use of shl
%t4 = icmp sgt i32 %t3, -1
%or = or i1 %t2, %t4
ret i1 %or
}
-define i1 @foo1_and_signbit_lshr_without_shifting_signbit_extra_use_shl2_logical(i32 %k, i32 %c1, i32 %c2, i32* %p) {
+define i1 @foo1_and_signbit_lshr_without_shifting_signbit_extra_use_shl2_logical(i32 %k, i32 %c1, i32 %c2, ptr %p) {
; CHECK-LABEL: @foo1_and_signbit_lshr_without_shifting_signbit_extra_use_shl2_logical(
; CHECK-NEXT: [[T0:%.*]] = shl i32 1, [[C1:%.*]]
; CHECK-NEXT: [[T1:%.*]] = and i32 [[T0]], [[K:%.*]]
; CHECK-NEXT: [[T2:%.*]] = icmp eq i32 [[T1]], 0
; CHECK-NEXT: [[T3:%.*]] = shl i32 [[K]], [[C2:%.*]]
-; CHECK-NEXT: store i32 [[T3]], i32* [[P:%.*]], align 4
+; CHECK-NEXT: store i32 [[T3]], ptr [[P:%.*]], align 4
; CHECK-NEXT: [[T4:%.*]] = icmp sgt i32 [[T3]], -1
; CHECK-NEXT: [[OR:%.*]] = select i1 [[T2]], i1 true, i1 [[T4]]
; CHECK-NEXT: ret i1 [[OR]]
%t1 = and i32 %t0, %k
%t2 = icmp eq i32 %t1, 0
%t3 = shl i32 %k, %c2
- store i32 %t3, i32* %p ; extra use of shl
+ store i32 %t3, ptr %p ; extra use of shl
%t4 = icmp sgt i32 %t3, -1
%or = select i1 %t2, i1 true, i1 %t4
ret i1 %or
}
; Not fold
-define i1 @foo1_and_signbit_lshr_without_shifting_signbit_extra_use_cmp2(i32 %k, i32 %c1, i32 %c2, i1* %p) {
+define i1 @foo1_and_signbit_lshr_without_shifting_signbit_extra_use_cmp2(i32 %k, i32 %c1, i32 %c2, ptr %p) {
; CHECK-LABEL: @foo1_and_signbit_lshr_without_shifting_signbit_extra_use_cmp2(
; CHECK-NEXT: [[T0:%.*]] = shl i32 1, [[C1:%.*]]
; CHECK-NEXT: [[T1:%.*]] = and i32 [[T0]], [[K:%.*]]
; CHECK-NEXT: [[T2:%.*]] = icmp eq i32 [[T1]], 0
; CHECK-NEXT: [[T3:%.*]] = shl i32 [[K]], [[C2:%.*]]
; CHECK-NEXT: [[T4:%.*]] = icmp sgt i32 [[T3]], -1
-; CHECK-NEXT: store i1 [[T4]], i1* [[P:%.*]], align 1
+; CHECK-NEXT: store i1 [[T4]], ptr [[P:%.*]], align 1
; CHECK-NEXT: [[OR:%.*]] = or i1 [[T2]], [[T4]]
; CHECK-NEXT: ret i1 [[OR]]
;
%t2 = icmp eq i32 %t1, 0
%t3 = shl i32 %k, %c2
%t4 = icmp sgt i32 %t3, -1
- store i1 %t4, i1* %p ; extra use of cmp
+ store i1 %t4, ptr %p ; extra use of cmp
%or = or i1 %t2, %t4
ret i1 %or
}
-define i1 @foo1_and_signbit_lshr_without_shifting_signbit_extra_use_cmp2_logical(i32 %k, i32 %c1, i32 %c2, i1* %p) {
+define i1 @foo1_and_signbit_lshr_without_shifting_signbit_extra_use_cmp2_logical(i32 %k, i32 %c1, i32 %c2, ptr %p) {
; CHECK-LABEL: @foo1_and_signbit_lshr_without_shifting_signbit_extra_use_cmp2_logical(
; CHECK-NEXT: [[T0:%.*]] = shl i32 1, [[C1:%.*]]
; CHECK-NEXT: [[T1:%.*]] = and i32 [[T0]], [[K:%.*]]
; CHECK-NEXT: [[T2:%.*]] = icmp eq i32 [[T1]], 0
; CHECK-NEXT: [[T3:%.*]] = shl i32 [[K]], [[C2:%.*]]
; CHECK-NEXT: [[T4:%.*]] = icmp sgt i32 [[T3]], -1
-; CHECK-NEXT: store i1 [[T4]], i1* [[P:%.*]], align 1
+; CHECK-NEXT: store i1 [[T4]], ptr [[P:%.*]], align 1
; CHECK-NEXT: [[OR:%.*]] = select i1 [[T2]], i1 true, i1 [[T4]]
; CHECK-NEXT: ret i1 [[OR]]
;
%t2 = icmp eq i32 %t1, 0
%t3 = shl i32 %k, %c2
%t4 = icmp sgt i32 %t3, -1
- store i1 %t4, i1* %p ; extra use of cmp
+ store i1 %t4, ptr %p ; extra use of cmp
%or = select i1 %t2, i1 true, i1 %t4
ret i1 %or
}
; CHECK-LABEL: @bitcast_opaque_to_opaque(
; CHECK-NEXT: ret ptr [[A:%.*]]
;
- %b = bitcast ptr %a to ptr
- ret ptr %b
+ ret ptr %a
}
-define ptr @bitcast_typed_to_opaque(i8* %a) {
+define ptr @bitcast_typed_to_opaque(ptr %a) {
; CHECK-LABEL: @bitcast_typed_to_opaque(
; CHECK-NEXT: ret ptr [[A:%.*]]
;
- %b = bitcast i8* %a to ptr
- ret ptr %b
+ ret ptr %a
}
-define i8* @bitcast_opaque_to_typed(ptr %a) {
+define ptr @bitcast_opaque_to_typed(ptr %a) {
; CHECK-LABEL: @bitcast_opaque_to_typed(
; CHECK-NEXT: ret ptr [[A:%.*]]
;
- %b = bitcast ptr %a to i8*
- ret i8* %b
+ ret ptr %a
}
@g = global i8 0
; CHECK-LABEL: @bitcast_typed_to_opaque_constexpr(
; CHECK-NEXT: ret ptr @g
;
- ret ptr bitcast (i8* @g to ptr)
+ ret ptr @g
}
define ptr @addrspacecast_opaque_to_opaque(ptr addrspace(1) %a) {
ret ptr %b
}
-define ptr @addrspacecast_typed_to_opaque(i8 addrspace(1)* %a) {
+define ptr @addrspacecast_typed_to_opaque(ptr addrspace(1) %a) {
; CHECK-LABEL: @addrspacecast_typed_to_opaque(
; CHECK-NEXT: [[B:%.*]] = addrspacecast ptr addrspace(1) [[A:%.*]] to ptr
; CHECK-NEXT: ret ptr [[B]]
;
- %b = addrspacecast i8 addrspace(1)* %a to ptr
+ %b = addrspacecast ptr addrspace(1) %a to ptr
ret ptr %b
}
-define i8* @addrspacecast_opaque_to_typed(ptr addrspace(1) %a) {
+define ptr @addrspacecast_opaque_to_typed(ptr addrspace(1) %a) {
; CHECK-LABEL: @addrspacecast_opaque_to_typed(
; CHECK-NEXT: [[B:%.*]] = addrspacecast ptr addrspace(1) [[A:%.*]] to ptr
; CHECK-NEXT: ret ptr [[B]]
;
- %b = addrspacecast ptr addrspace(1) %a to i8*
- ret i8* %b
+ %b = addrspacecast ptr addrspace(1) %a to ptr
+ ret ptr %b
}
define ptr addrspace(1) @bitcast_and_addrspacecast_eliminable(ptr %a) {
; CHECK-NEXT: [[C:%.*]] = addrspacecast ptr [[A:%.*]] to ptr addrspace(1)
; CHECK-NEXT: ret ptr addrspace(1) [[C]]
;
- %b = bitcast ptr %a to i8*
- %c = addrspacecast i8* %b to ptr addrspace(1)
+ %c = addrspacecast ptr %a to ptr addrspace(1)
ret ptr addrspace(1) %c
}
; CHECK-LABEL: @addrspacecast_typed_to_opaque_constexpr(
; CHECK-NEXT: ret ptr addrspace(1) addrspacecast (ptr @g to ptr addrspace(1))
;
- ret ptr addrspace(1) addrspacecast (i8* @g to ptr addrspace(1))
+ ret ptr addrspace(1) addrspacecast (ptr @g to ptr addrspace(1))
}
define ptr @gep_constexpr_1(ptr %a) {
; CHECK-LABEL: @gep_constexpr_2(
; CHECK-NEXT: ret ptr getelementptr (i8, ptr @g, i64 3)
;
- ret ptr getelementptr (i8, ptr bitcast (i8* @g to ptr), i32 3)
+ ret ptr getelementptr (i8, ptr @g, i32 3)
}
define ptr addrspace(1) @gep_constexpr_3(ptr %a) {
; CHECK-LABEL: @gep_constexpr_3(
; CHECK-NEXT: ret ptr addrspace(1) getelementptr (i8, ptr addrspace(1) addrspacecast (ptr @g to ptr addrspace(1)), i64 3)
;
- ret ptr addrspace(1) getelementptr ([0 x i8], ptr addrspace(1) addrspacecast (i8* @g to ptr addrspace(1)), i64 0, i32 3)
+ ret ptr addrspace(1) getelementptr ([0 x i8], ptr addrspace(1) addrspacecast (ptr @g to ptr addrspace(1)), i64 0, i32 3)
}
define ptr @load_bitcast_1(ptr %a) {
; CHECK-NEXT: [[B1:%.*]] = load ptr, ptr [[A:%.*]], align 8
; CHECK-NEXT: ret ptr [[B1]]
;
- %b = load i8*, ptr %a
- %c = bitcast i8* %b to ptr
- ret ptr %c
+ %b = load ptr, ptr %a
+ ret ptr %b
}
define ptr @load_bitcast_2(ptr %a) {
; CHECK-NEXT: [[C1:%.*]] = load ptr, ptr [[A:%.*]], align 8
; CHECK-NEXT: ret ptr [[C1]]
;
- %b = bitcast ptr %a to i8**
- %c = load i8*, i8** %b
- %d = bitcast i8* %c to ptr
- ret ptr %d
+ %c = load ptr, ptr %a
+ ret ptr %c
}
define void @call(ptr %a) {
}
declare void @varargs(...)
-define void @varargs_cast_typed_to_opaque_same_type(i32* %a) {
+define void @varargs_cast_typed_to_opaque_same_type(ptr %a) {
; CHECK-LABEL: @varargs_cast_typed_to_opaque_same_type(
; CHECK-NEXT: call void (...) @varargs(ptr byval(i32) [[A:%.*]])
; CHECK-NEXT: ret void
;
- %b = bitcast i32* %a to ptr
- call void (...) @varargs(ptr byval(i32) %b)
+ call void (...) @varargs(ptr byval(i32) %a)
ret void
}
-define void @varargs_cast_typed_to_opaque_different_type(i32* %a) {
+define void @varargs_cast_typed_to_opaque_different_type(ptr %a) {
; CHECK-LABEL: @varargs_cast_typed_to_opaque_different_type(
; CHECK-NEXT: call void (...) @varargs(ptr byval(float) [[A:%.*]])
; CHECK-NEXT: ret void
;
- %b = bitcast i32* %a to ptr
- call void (...) @varargs(ptr byval(float) %b)
+ call void (...) @varargs(ptr byval(float) %a)
ret void
}
-define void @varargs_cast_typed_to_opaque_different_size(i32* %a) {
+define void @varargs_cast_typed_to_opaque_different_size(ptr %a) {
; CHECK-LABEL: @varargs_cast_typed_to_opaque_different_size(
; CHECK-NEXT: call void (...) @varargs(ptr byval(i64) [[A:%.*]])
; CHECK-NEXT: ret void
;
- %b = bitcast i32* %a to ptr
- call void (...) @varargs(ptr byval(i64) %b)
+ call void (...) @varargs(ptr byval(i64) %a)
ret void
}
; CHECK-NEXT: call void (...) @varargs(ptr byval(i8) [[A:%.*]])
; CHECK-NEXT: ret void
;
- %b = bitcast ptr %a to i8*
- call void (...) @varargs(i8* byval(i8) %b)
+ call void (...) @varargs(ptr byval(i8) %a)
ret void
}
%Vs4Int8 = type <{ i8 }>
%swift.type = type { i64 }
-declare void @llvm.memcpy.p0i8.p0i8.i64(i8* nocapture, i8* nocapture readonly, i64, i1) #8
+declare void @llvm.memcpy.p0.p0.i64(ptr nocapture, ptr nocapture readonly, i64, i1) #8
-@_swift_slowAlloc = external global i8* (i64, i64)*
+@_swift_slowAlloc = external global ptr
-declare i8* @rt_swift_slowAlloc(i64, i64)
+declare ptr @rt_swift_slowAlloc(i64, i64)
-define %swift.opaque* @_TwTkV([24 x i8]* %dest, %swift.opaque* %src,
-%swift.type* %bios_boot_params) #0 {
+define ptr @_TwTkV(ptr %dest, ptr %src,
+ptr %bios_boot_params) #0 {
entry:
- %0 = bitcast %swift.opaque* %src to %V*
- %1 = call noalias i8* @rt_swift_slowAlloc(i64 40, i64 0) #11
- %2 = bitcast [24 x i8]* %dest to i8**
- store i8* %1, i8** %2, align 8
- %3 = bitcast i8* %1 to %V*
- %4 = bitcast %V* %3 to i8*
- %5 = bitcast %V* %0 to i8*
- call void @llvm.memcpy.p0i8.p0i8.i64(i8* %4, i8* %5, i64 40, i1 false)
- %6 = bitcast %V* %3 to %swift.opaque*
- ret %swift.opaque* %6
+ %0 = call noalias ptr @rt_swift_slowAlloc(i64 40, i64 0) #11
+ store ptr %0, ptr %dest, align 8
+ call void @llvm.memcpy.p0.p0.i64(ptr %0, ptr %src, i64 40, i1 false)
+ ret ptr %0
}
ret i37 %r
}
-define i32 @shl_mask_extra_use(i32 %x, i32* %p) {
+define i32 @shl_mask_extra_use(i32 %x, ptr %p) {
; CHECK-LABEL: @shl_mask_extra_use(
; CHECK-NEXT: [[Z:%.*]] = and i32 [[X:%.*]], 255
; CHECK-NEXT: [[S:%.*]] = shl nuw nsw i32 [[Z]], 8
-; CHECK-NEXT: store i32 [[S]], i32* [[P:%.*]], align 4
+; CHECK-NEXT: store i32 [[S]], ptr [[P:%.*]], align 4
; CHECK-NEXT: [[R:%.*]] = or i32 [[Z]], [[S]]
; CHECK-NEXT: ret i32 [[R]]
;
%z = and i32 %x, 255
%s = shl i32 %z, 8
- store i32 %s, i32* %p
+ store i32 %s, ptr %p
%r = or i32 %z, %s
ret i32 %r
}
}
-define i8 @test5_extra_use_not(i8 %x, i8 %y, i8* %dst) {
+define i8 @test5_extra_use_not(i8 %x, i8 %y, ptr %dst) {
; CHECK-LABEL: @test5_extra_use_not(
; CHECK-NEXT: [[NOTX:%.*]] = xor i8 [[X:%.*]], -1
-; CHECK-NEXT: store i8 [[NOTX]], i8* [[DST:%.*]], align 1
+; CHECK-NEXT: store i8 [[NOTX]], ptr [[DST:%.*]], align 1
; CHECK-NEXT: [[TMP1:%.*]] = and i8 [[X]], [[Y:%.*]]
; CHECK-NEXT: [[Z:%.*]] = xor i8 [[TMP1]], -1
; CHECK-NEXT: ret i8 [[Z]]
;
%xor = xor i8 %x, %y
%notx = xor i8 %x, -1
- store i8 %notx, i8* %dst
+ store i8 %notx, ptr %dst
%z = or i8 %notx, %xor
ret i8 %z
}
-define i65 @test5_extra_use_xor(i65 %x, i65 %y, i65* %dst) {
+define i65 @test5_extra_use_xor(i65 %x, i65 %y, ptr %dst) {
; CHECK-LABEL: @test5_extra_use_xor(
; CHECK-NEXT: [[XOR:%.*]] = xor i65 [[X:%.*]], [[Y:%.*]]
-; CHECK-NEXT: store i65 [[XOR]], i65* [[DST:%.*]], align 4
+; CHECK-NEXT: store i65 [[XOR]], ptr [[DST:%.*]], align 4
; CHECK-NEXT: [[TMP1:%.*]] = and i65 [[X]], [[Y]]
; CHECK-NEXT: [[Z:%.*]] = xor i65 [[TMP1]], -1
; CHECK-NEXT: ret i65 [[Z]]
;
%xor = xor i65 %x, %y
- store i65 %xor, i65* %dst
+ store i65 %xor, ptr %dst
%notx = xor i65 %x, -1
%z = or i65 %notx, %xor
ret i65 %z
}
-define i16 @test5_extra_use_not_xor(i16 %x, i16 %y, i16* %dst_not, i16* %dst_xor) {
+define i16 @test5_extra_use_not_xor(i16 %x, i16 %y, ptr %dst_not, ptr %dst_xor) {
; CHECK-LABEL: @test5_extra_use_not_xor(
; CHECK-NEXT: [[XOR:%.*]] = xor i16 [[X:%.*]], [[Y:%.*]]
-; CHECK-NEXT: store i16 [[XOR]], i16* [[DST_XOR:%.*]], align 2
+; CHECK-NEXT: store i16 [[XOR]], ptr [[DST_XOR:%.*]], align 2
; CHECK-NEXT: [[NOTX:%.*]] = xor i16 [[X]], -1
-; CHECK-NEXT: store i16 [[NOTX]], i16* [[DST_NOT:%.*]], align 2
+; CHECK-NEXT: store i16 [[NOTX]], ptr [[DST_NOT:%.*]], align 2
; CHECK-NEXT: [[Z:%.*]] = or i16 [[XOR]], [[NOTX]]
; CHECK-NEXT: ret i16 [[Z]]
;
%xor = xor i16 %x, %y
- store i16 %xor, i16* %dst_xor
+ store i16 %xor, ptr %dst_xor
%notx = xor i16 %x, -1
- store i16 %notx, i16* %dst_not
+ store i16 %notx, ptr %dst_not
%z = or i16 %notx, %xor
ret i16 %z
}
ret i32 %or
}
-define i32 @test10_extrause(i32 %A, i32 %B, i32* %dst) {
+define i32 @test10_extrause(i32 %A, i32 %B, ptr %dst) {
; CHECK-LABEL: @test10_extrause(
; CHECK-NEXT: [[NOT:%.*]] = xor i32 [[A:%.*]], -1
-; CHECK-NEXT: store i32 [[NOT]], i32* [[DST:%.*]], align 4
+; CHECK-NEXT: store i32 [[NOT]], ptr [[DST:%.*]], align 4
; CHECK-NEXT: ret i32 -1
;
%xor1 = xor i32 %B, %A
%not = xor i32 %A, -1
- store i32 %not, i32* %dst
+ store i32 %not, ptr %dst
%xor2 = xor i32 %not, %B
%or = or i32 %xor1, %xor2
ret i32 %or
}
-define i32 @test10_commuted_extrause(i32 %A, i32 %B, i32* %dst) {
+define i32 @test10_commuted_extrause(i32 %A, i32 %B, ptr %dst) {
; CHECK-LABEL: @test10_commuted_extrause(
; CHECK-NEXT: [[NOT:%.*]] = xor i32 [[A:%.*]], -1
-; CHECK-NEXT: store i32 [[NOT]], i32* [[DST:%.*]], align 4
+; CHECK-NEXT: store i32 [[NOT]], ptr [[DST:%.*]], align 4
; CHECK-NEXT: ret i32 -1
;
%xor1 = xor i32 %B, %A
%not = xor i32 %A, -1
- store i32 %not, i32* %dst
+ store i32 %not, ptr %dst
%xor2 = xor i32 %not, %B
%or = or i32 %xor2, %xor1
ret i32 %or
ret i1 %D
}
-define i1 @test27(i32* %A, i32* %B) {
+define i1 @test27(ptr %A, ptr %B) {
; CHECK-LABEL: @test27(
-; CHECK-NEXT: [[TMP1:%.*]] = icmp eq i32* [[A:%.*]], null
-; CHECK-NEXT: [[TMP2:%.*]] = icmp eq i32* [[B:%.*]], null
+; CHECK-NEXT: [[TMP1:%.*]] = icmp eq ptr [[A:%.*]], null
+; CHECK-NEXT: [[TMP2:%.*]] = icmp eq ptr [[B:%.*]], null
; CHECK-NEXT: [[E:%.*]] = and i1 [[TMP1]], [[TMP2]]
; CHECK-NEXT: ret i1 [[E]]
;
- %C1 = ptrtoint i32* %A to i32
- %C2 = ptrtoint i32* %B to i32
+ %C1 = ptrtoint ptr %A to i32
+ %C2 = ptrtoint ptr %B to i32
%D = or i32 %C1, %C2
%E = icmp eq i32 %D, 0
ret i1 %E
}
-define <2 x i1> @test27vec(<2 x i32*> %A, <2 x i32*> %B) {
+define <2 x i1> @test27vec(<2 x ptr> %A, <2 x ptr> %B) {
; CHECK-LABEL: @test27vec(
-; CHECK-NEXT: [[TMP1:%.*]] = icmp eq <2 x i32*> [[A:%.*]], zeroinitializer
-; CHECK-NEXT: [[TMP2:%.*]] = icmp eq <2 x i32*> [[B:%.*]], zeroinitializer
+; CHECK-NEXT: [[TMP1:%.*]] = icmp eq <2 x ptr> [[A:%.*]], zeroinitializer
+; CHECK-NEXT: [[TMP2:%.*]] = icmp eq <2 x ptr> [[B:%.*]], zeroinitializer
; CHECK-NEXT: [[E:%.*]] = and <2 x i1> [[TMP1]], [[TMP2]]
; CHECK-NEXT: ret <2 x i1> [[E]]
;
- %C1 = ptrtoint <2 x i32*> %A to <2 x i32>
- %C2 = ptrtoint <2 x i32*> %B to <2 x i32>
+ %C1 = ptrtoint <2 x ptr> %A to <2 x i32>
+ %C2 = ptrtoint <2 x ptr> %B to <2 x i32>
%D = or <2 x i32> %C1, %C2
%E = icmp eq <2 x i32> %D, zeroinitializer
ret <2 x i1> %E
ret i1 %D
}
-define i1 @test29(i32* %A, i32* %B) {
+define i1 @test29(ptr %A, ptr %B) {
; CHECK-LABEL: @test29(
-; CHECK-NEXT: [[TMP1:%.*]] = icmp ne i32* [[A:%.*]], null
-; CHECK-NEXT: [[TMP2:%.*]] = icmp ne i32* [[B:%.*]], null
+; CHECK-NEXT: [[TMP1:%.*]] = icmp ne ptr [[A:%.*]], null
+; CHECK-NEXT: [[TMP2:%.*]] = icmp ne ptr [[B:%.*]], null
; CHECK-NEXT: [[E:%.*]] = or i1 [[TMP1]], [[TMP2]]
; CHECK-NEXT: ret i1 [[E]]
;
- %C1 = ptrtoint i32* %A to i32
- %C2 = ptrtoint i32* %B to i32
+ %C1 = ptrtoint ptr %A to i32
+ %C2 = ptrtoint ptr %B to i32
%D = or i32 %C1, %C2
%E = icmp ne i32 %D, 0
ret i1 %E
}
-define <2 x i1> @test29vec(<2 x i32*> %A, <2 x i32*> %B) {
+define <2 x i1> @test29vec(<2 x ptr> %A, <2 x ptr> %B) {
; CHECK-LABEL: @test29vec(
-; CHECK-NEXT: [[TMP1:%.*]] = icmp ne <2 x i32*> [[A:%.*]], zeroinitializer
-; CHECK-NEXT: [[TMP2:%.*]] = icmp ne <2 x i32*> [[B:%.*]], zeroinitializer
+; CHECK-NEXT: [[TMP1:%.*]] = icmp ne <2 x ptr> [[A:%.*]], zeroinitializer
+; CHECK-NEXT: [[TMP2:%.*]] = icmp ne <2 x ptr> [[B:%.*]], zeroinitializer
; CHECK-NEXT: [[E:%.*]] = or <2 x i1> [[TMP1]], [[TMP2]]
; CHECK-NEXT: ret <2 x i1> [[E]]
;
- %C1 = ptrtoint <2 x i32*> %A to <2 x i32>
- %C2 = ptrtoint <2 x i32*> %B to <2 x i32>
+ %C1 = ptrtoint <2 x ptr> %A to <2 x i32>
+ %C2 = ptrtoint <2 x ptr> %B to <2 x i32>
%D = or <2 x i32> %C1, %C2
%E = icmp ne <2 x i32> %D, zeroinitializer
ret <2 x i1> %E
target datalayout = "e-p:32:32:32-i1:8:8-i8:8:8-i16:16:16-i32:32:32-i64:32:64-f32:32:32-f64:32:64-v64:64:64-v128:128:128-a0:0:64-f80:128:128-n8:16:32-S128"
target triple = "i386-apple-macosx10.7.2"
-%struct.__sFILE = type { i8*, i32, i32, i16, i16, %struct.__sbuf, i32, i8*, i32 (i8*)*, i32 (i8*, i8*, i32)*, i64 (i8*, i64, i32)*, i32 (i8*, i8*, i32)*, %struct.__sbuf, %struct.__sFILEX*, i32, [3 x i8], [1 x i8], %struct.__sbuf, i32, i64 }
-%struct.__sbuf = type { i8*, i32 }
+%struct.__sFILE = type { ptr, i32, i32, i16, i16, %struct.__sbuf, i32, ptr, ptr, ptr, ptr, ptr, %struct.__sbuf, ptr, i32, [3 x i8], [1 x i8], %struct.__sbuf, i32, i64 }
+%struct.__sbuf = type { ptr, i32 }
%struct.__sFILEX = type opaque
@.str = private unnamed_addr constant [13 x i8] c"Hello world\0A\00", align 1
@.str2 = private unnamed_addr constant [3 x i8] c"%s\00", align 1
-define void @test1(%struct.__sFILE* %stream) nounwind {
+define void @test1(ptr %stream) nounwind {
; CHECK-LABEL: define void @test1(
; CHECK: call i32 @"fwrite$UNIX2003"
- %call = tail call i32 (%struct.__sFILE*, i8*, ...) @fprintf(%struct.__sFILE* %stream, i8* getelementptr inbounds ([13 x i8], [13 x i8]* @.str, i32 0, i32 0)) nounwind
+ %call = tail call i32 (ptr, ptr, ...) @fprintf(ptr %stream, ptr @.str) nounwind
ret void
}
-define void @test2(%struct.__sFILE* %stream, i8* %str) nounwind ssp {
+define void @test2(ptr %stream, ptr %str) nounwind ssp {
; CHECK-LABEL: define void @test2(
; CHECK: call i32 @"fputs$UNIX2003"
- %call = tail call i32 (%struct.__sFILE*, i8*, ...) @fprintf(%struct.__sFILE* %stream, i8* getelementptr inbounds ([3 x i8], [3 x i8]* @.str2, i32 0, i32 0), i8* %str) nounwind
+ %call = tail call i32 (ptr, ptr, ...) @fprintf(ptr %stream, ptr @.str2, ptr %str) nounwind
ret void
}
-declare i32 @fprintf(%struct.__sFILE*, i8*, ...) nounwind
+declare i32 @fprintf(ptr, ptr, ...) nounwind
@pr21445_data = external global i32
define i1 @pr21445(i8 %a) {
; CHECK-LABEL: @pr21445(
-; CHECK-NEXT: %[[umul:.*]] = call { i8, i1 } @llvm.umul.with.overflow.i8(i8 %a, i8 ptrtoint (i32* @pr21445_data to i8))
+; CHECK-NEXT: %[[umul:.*]] = call { i8, i1 } @llvm.umul.with.overflow.i8(i8 %a, i8 ptrtoint (ptr @pr21445_data to i8))
; CHECK-NEXT: %[[cmp:.*]] = extractvalue { i8, i1 } %[[umul]], 1
; CHECK-NEXT: ret i1 %[[cmp]]
%ext = zext i8 %a to i32
- %mul = mul i32 %ext, zext (i8 ptrtoint (i32* @pr21445_data to i8) to i32)
+ %mul = mul i32 %ext, zext (i8 ptrtoint (ptr @pr21445_data to i8) to i32)
%and = and i32 %mul, 255
%cmp = icmp ne i32 %mul, %and
ret i1 %cmp
; promote it since the add.off instruction has another use, and 2) it is unsafe
; because the add-with-off makes the high bits of the original add live.
-define i32 @test2(i32 %a, i32 %b, i64* %P) nounwind ssp {
+define i32 @test2(i32 %a, i32 %b, ptr %P) nounwind ssp {
; CHECK-LABEL: @test2(
; CHECK-NEXT: entry:
; CHECK-NEXT: [[CONV:%.*]] = sext i32 [[A:%.*]] to i64
; CHECK-NEXT: [[CONV2:%.*]] = sext i32 [[B:%.*]] to i64
; CHECK-NEXT: [[ADD:%.*]] = add nsw i64 [[CONV2]], [[CONV]]
; CHECK-NEXT: [[ADD_OFF:%.*]] = add nsw i64 [[ADD]], 2147483648
-; CHECK-NEXT: store i64 [[ADD_OFF]], i64* [[P:%.*]], align 4
+; CHECK-NEXT: store i64 [[ADD_OFF]], ptr [[P:%.*]], align 4
; CHECK-NEXT: [[TMP0:%.*]] = icmp ugt i64 [[ADD_OFF]], 4294967295
; CHECK-NEXT: br i1 [[TMP0]], label [[IF_THEN:%.*]], label [[IF_END:%.*]]
; CHECK: if.then:
%conv2 = sext i32 %b to i64
%add = add nsw i64 %conv2, %conv
%add.off = add i64 %add, 2147483648
- store i64 %add.off, i64* %P
+ store i64 %add.off, ptr %P
%0 = icmp ugt i64 %add.off, 4294967295
br i1 %0, label %if.then, label %if.end
; RUN: opt -passes=instcombine -S < %s | FileCheck %s
; Most basic case, fully identical PHI nodes
-define void @test0(i32 %v0, i32 %v1, i1 %c, i32* %d0, i32* %d1) {
+define void @test0(i32 %v0, i32 %v1, i1 %c, ptr %d0, ptr %d1) {
; CHECK-LABEL: @test0(
; CHECK-NEXT: entry:
; CHECK-NEXT: br i1 [[C:%.*]], label [[B0:%.*]], label [[B1:%.*]]
; CHECK-NEXT: br label [[END]]
; CHECK: end:
; CHECK-NEXT: [[I1:%.*]] = phi i32 [ [[V0:%.*]], [[B0]] ], [ [[V1:%.*]], [[B1]] ]
-; CHECK-NEXT: store i32 [[I1]], i32* [[D0:%.*]], align 4
-; CHECK-NEXT: store i32 [[I1]], i32* [[D1:%.*]], align 4
+; CHECK-NEXT: store i32 [[I1]], ptr [[D0:%.*]], align 4
+; CHECK-NEXT: store i32 [[I1]], ptr [[D1:%.*]], align 4
; CHECK-NEXT: ret void
;
entry:
end:
%i0 = phi i32 [ %v0, %b0 ], [ %v1, %b1 ]
%i1 = phi i32 [ %v0, %b0 ], [ %v1, %b1 ]
- store i32 %i0, i32* %d0
- store i32 %i1, i32* %d1
+ store i32 %i0, ptr %d0
+ store i32 %i1, ptr %d1
ret void
}
; Fully identical PHI nodes, but order of operands differs
-define void @test1(i32 %v0, i32 %v1, i1 %c, i32* %d0, i32* %d1) {
+define void @test1(i32 %v0, i32 %v1, i1 %c, ptr %d0, ptr %d1) {
; CHECK-LABEL: @test1(
; CHECK-NEXT: entry:
; CHECK-NEXT: br i1 [[C:%.*]], label [[B0:%.*]], label [[B1:%.*]]
; CHECK-NEXT: br label [[END]]
; CHECK: end:
; CHECK-NEXT: [[I0:%.*]] = phi i32 [ [[V0:%.*]], [[B0]] ], [ [[V1:%.*]], [[B1]] ]
-; CHECK-NEXT: store i32 [[I0]], i32* [[D0:%.*]], align 4
-; CHECK-NEXT: store i32 [[I0]], i32* [[D1:%.*]], align 4
+; CHECK-NEXT: store i32 [[I0]], ptr [[D0:%.*]], align 4
+; CHECK-NEXT: store i32 [[I0]], ptr [[D1:%.*]], align 4
; CHECK-NEXT: ret void
;
entry:
end:
%i0 = phi i32 [ %v0, %b0 ], [ %v1, %b1 ]
%i1 = phi i32 [ %v1, %b1 ], [ %v0, %b0 ]
- store i32 %i0, i32* %d0
- store i32 %i1, i32* %d1
+ store i32 %i0, ptr %d0
+ store i32 %i1, ptr %d1
ret void
}
; Different incoming values in second PHI
-define void @negative_test2(i32 %v0, i32 %v1, i32 %v2, i1 %c, i32* %d0, i32* %d1) {
+define void @negative_test2(i32 %v0, i32 %v1, i32 %v2, i1 %c, ptr %d0, ptr %d1) {
; CHECK-LABEL: @negative_test2(
; CHECK-NEXT: entry:
; CHECK-NEXT: br i1 [[C:%.*]], label [[B0:%.*]], label [[B1:%.*]]
; CHECK: end:
; CHECK-NEXT: [[I0:%.*]] = phi i32 [ [[V0:%.*]], [[B0]] ], [ [[V1:%.*]], [[B1]] ]
; CHECK-NEXT: [[I1:%.*]] = phi i32 [ [[V0]], [[B0]] ], [ [[V2:%.*]], [[B1]] ]
-; CHECK-NEXT: store i32 [[I0]], i32* [[D0:%.*]], align 4
-; CHECK-NEXT: store i32 [[I1]], i32* [[D1:%.*]], align 4
+; CHECK-NEXT: store i32 [[I0]], ptr [[D0:%.*]], align 4
+; CHECK-NEXT: store i32 [[I1]], ptr [[D1:%.*]], align 4
; CHECK-NEXT: ret void
;
entry:
end:
%i0 = phi i32 [ %v0, %b0 ], [ %v1, %b1 ]
%i1 = phi i32 [ %v0, %b0 ], [ %v2, %b1 ] ; from %b0 takes %v2 instead of %v1
- store i32 %i0, i32* %d0
- store i32 %i1, i32* %d1
+ store i32 %i0, ptr %d0
+ store i32 %i1, ptr %d1
ret void
}
-define void @negative_test3(i32 %v0, i32 %v1, i32 %v2, i1 %c, i32* %d0, i32* %d1) {
+define void @negative_test3(i32 %v0, i32 %v1, i32 %v2, i1 %c, ptr %d0, ptr %d1) {
; CHECK-LABEL: @negative_test3(
; CHECK-NEXT: entry:
; CHECK-NEXT: br i1 [[C:%.*]], label [[B0:%.*]], label [[B1:%.*]]
; CHECK: end:
; CHECK-NEXT: [[I0:%.*]] = phi i32 [ [[V0:%.*]], [[B0]] ], [ [[V1:%.*]], [[B1]] ]
; CHECK-NEXT: [[I1:%.*]] = phi i32 [ [[V0]], [[B0]] ], [ [[V2:%.*]], [[B1]] ]
-; CHECK-NEXT: store i32 [[I0]], i32* [[D0:%.*]], align 4
-; CHECK-NEXT: store i32 [[I1]], i32* [[D1:%.*]], align 4
+; CHECK-NEXT: store i32 [[I0]], ptr [[D0:%.*]], align 4
+; CHECK-NEXT: store i32 [[I1]], ptr [[D1:%.*]], align 4
; CHECK-NEXT: ret void
;
entry:
end:
%i0 = phi i32 [ %v0, %b0 ], [ %v1, %b1 ]
%i1 = phi i32 [ %v2, %b1 ], [ %v0, %b0 ] ; from %b0 takes %v2 instead of %v1
- store i32 %i0, i32* %d0
- store i32 %i1, i32* %d1
+ store i32 %i0, ptr %d0
+ store i32 %i1, ptr %d1
ret void
}
-define void @negative_test4(i32 %v0, i32 %v1, i1 %c, i32* %d0, i32* %d1) {
+define void @negative_test4(i32 %v0, i32 %v1, i1 %c, ptr %d0, ptr %d1) {
; CHECK-LABEL: @negative_test4(
; CHECK-NEXT: entry:
; CHECK-NEXT: br i1 [[C:%.*]], label [[B0:%.*]], label [[B1:%.*]]
; CHECK-NEXT: br label [[END]]
; CHECK: end:
; CHECK-NEXT: [[I0:%.*]] = phi i32 [ [[V0:%.*]], [[B0]] ], [ [[V1:%.*]], [[B1]] ]
-; CHECK-NEXT: store i32 [[I0]], i32* [[D0:%.*]], align 4
-; CHECK-NEXT: store i32 [[I0]], i32* [[D1:%.*]], align 4
+; CHECK-NEXT: store i32 [[I0]], ptr [[D0:%.*]], align 4
+; CHECK-NEXT: store i32 [[I0]], ptr [[D1:%.*]], align 4
; CHECK-NEXT: ret void
;
entry:
end:
%i0 = phi i32 [ %v0, %b0 ], [ %v1, %b1 ]
%i1 = phi i32 [ %v1, %b1 ], [ %v0, %b0 ] ; incoming values are swapped
- store i32 %i0, i32* %d0
- store i32 %i1, i32* %d1
+ store i32 %i0, ptr %d0
+ store i32 %i1, ptr %d1
ret void
}
; Both PHI's are identical, but the first one has no uses, so ignore it.
-define void @test5(i32 %v0, i32 %v1, i1 %c, i32* %d0, i32* %d1) {
+define void @test5(i32 %v0, i32 %v1, i1 %c, ptr %d0, ptr %d1) {
; CHECK-LABEL: @test5(
; CHECK-NEXT: entry:
; CHECK-NEXT: br i1 [[C:%.*]], label [[B0:%.*]], label [[B1:%.*]]
; CHECK-NEXT: br label [[END]]
; CHECK: end:
; CHECK-NEXT: [[I1:%.*]] = phi i32 [ [[V0:%.*]], [[B0]] ], [ [[V1:%.*]], [[B1]] ]
-; CHECK-NEXT: store i32 [[I1]], i32* [[D1:%.*]], align 4
+; CHECK-NEXT: store i32 [[I1]], ptr [[D1:%.*]], align 4
; CHECK-NEXT: ret void
;
entry:
end:
%i0 = phi i32 [ %v0, %b0 ], [ %v1, %b1 ] ; unused
%i1 = phi i32 [ %v0, %b0 ], [ %v1, %b1 ]
- store i32 %i1, i32* %d1
+ store i32 %i1, ptr %d1
ret void
}
; Second PHI has no uses
-define void @test6(i32 %v0, i32 %v1, i1 %c, i32* %d0, i32* %d1) {
+define void @test6(i32 %v0, i32 %v1, i1 %c, ptr %d0, ptr %d1) {
; CHECK-LABEL: @test6(
; CHECK-NEXT: entry:
; CHECK-NEXT: br i1 [[C:%.*]], label [[B0:%.*]], label [[B1:%.*]]
; CHECK-NEXT: br label [[END]]
; CHECK: end:
; CHECK-NEXT: [[I0:%.*]] = phi i32 [ [[V0:%.*]], [[B0]] ], [ [[V1:%.*]], [[B1]] ]
-; CHECK-NEXT: store i32 [[I0]], i32* [[D0:%.*]], align 4
+; CHECK-NEXT: store i32 [[I0]], ptr [[D0:%.*]], align 4
; CHECK-NEXT: ret void
;
entry:
end:
%i0 = phi i32 [ %v0, %b0 ], [ %v1, %b1 ]
%i1 = phi i32 [ %v0, %b0 ], [ %v1, %b1 ] ; unused
- store i32 %i0, i32* %d0
+ store i32 %i0, ptr %d0
ret void
}
; Non-matching PHI node should be ignored without terminating CSE.
-define void @test7(i32 %v0, i32 %v1, i16 %v2, i16 %v3, i1 %c, i32* %d0, i32* %d1, i16* %d2) {
+define void @test7(i32 %v0, i32 %v1, i16 %v2, i16 %v3, i1 %c, ptr %d0, ptr %d1, ptr %d2) {
; CHECK-LABEL: @test7(
; CHECK-NEXT: entry:
; CHECK-NEXT: br i1 [[C:%.*]], label [[B0:%.*]], label [[B1:%.*]]
; CHECK: end:
; CHECK-NEXT: [[IBAD:%.*]] = phi i16 [ [[V2:%.*]], [[B0]] ], [ [[V3:%.*]], [[B1]] ]
; CHECK-NEXT: [[I1:%.*]] = phi i32 [ [[V0:%.*]], [[B0]] ], [ [[V1:%.*]], [[B1]] ]
-; CHECK-NEXT: store i32 [[I1]], i32* [[D0:%.*]], align 4
-; CHECK-NEXT: store i32 [[I1]], i32* [[D1:%.*]], align 4
-; CHECK-NEXT: store i16 [[IBAD]], i16* [[D2:%.*]], align 2
+; CHECK-NEXT: store i32 [[I1]], ptr [[D0:%.*]], align 4
+; CHECK-NEXT: store i32 [[I1]], ptr [[D1:%.*]], align 4
+; CHECK-NEXT: store i16 [[IBAD]], ptr [[D2:%.*]], align 2
; CHECK-NEXT: ret void
;
entry:
%iBAD = phi i16 [ %v2, %b0 ], [ %v3, %b1 ]
%i0 = phi i32 [ %v0, %b0 ], [ %v1, %b1 ]
%i1 = phi i32 [ %v0, %b0 ], [ %v1, %b1 ]
- store i32 %i0, i32* %d0
- store i32 %i1, i32* %d1
- store i16 %iBAD, i16* %d2
+ store i32 %i0, ptr %d0
+ store i32 %i1, ptr %d1
+ store i16 %iBAD, ptr %d2
ret void
}
-define void @test8(i32 %v0, i32 %v1, i16 %v2, i16 %v3, i1 %c, i32* %d0, i32* %d1, i16* %d2) {
+define void @test8(i32 %v0, i32 %v1, i16 %v2, i16 %v3, i1 %c, ptr %d0, ptr %d1, ptr %d2) {
; CHECK-LABEL: @test8(
; CHECK-NEXT: entry:
; CHECK-NEXT: br i1 [[C:%.*]], label [[B0:%.*]], label [[B1:%.*]]
; CHECK: end:
; CHECK-NEXT: [[IBAD:%.*]] = phi i16 [ [[V2:%.*]], [[B0]] ], [ [[V3:%.*]], [[B1]] ]
; CHECK-NEXT: [[I1:%.*]] = phi i32 [ [[V0:%.*]], [[B0]] ], [ [[V1:%.*]], [[B1]] ]
-; CHECK-NEXT: store i32 [[I1]], i32* [[D0:%.*]], align 4
-; CHECK-NEXT: store i32 [[I1]], i32* [[D1:%.*]], align 4
-; CHECK-NEXT: store i16 [[IBAD]], i16* [[D2:%.*]], align 2
+; CHECK-NEXT: store i32 [[I1]], ptr [[D0:%.*]], align 4
+; CHECK-NEXT: store i32 [[I1]], ptr [[D1:%.*]], align 4
+; CHECK-NEXT: store i16 [[IBAD]], ptr [[D2:%.*]], align 2
; CHECK-NEXT: ret void
;
entry:
%i0 = phi i32 [ %v0, %b0 ], [ %v1, %b1 ]
%iBAD = phi i16 [ %v2, %b0 ], [ %v3, %b1 ]
%i1 = phi i32 [ %v0, %b0 ], [ %v1, %b1 ]
- store i32 %i0, i32* %d0
- store i32 %i1, i32* %d1
- store i16 %iBAD, i16* %d2
+ store i32 %i0, ptr %d0
+ store i32 %i1, ptr %d1
+ store i16 %iBAD, ptr %d2
ret void
}
-define void @test9(i32 %v0, i32 %v1, i16 %v2, i16 %v3, i1 %c, i32* %d0, i32* %d1, i16* %d2) {
+define void @test9(i32 %v0, i32 %v1, i16 %v2, i16 %v3, i1 %c, ptr %d0, ptr %d1, ptr %d2) {
; CHECK-LABEL: @test9(
; CHECK-NEXT: entry:
; CHECK-NEXT: br i1 [[C:%.*]], label [[B0:%.*]], label [[B1:%.*]]
; CHECK: end:
; CHECK-NEXT: [[I1:%.*]] = phi i32 [ [[V0:%.*]], [[B0]] ], [ [[V1:%.*]], [[B1]] ]
; CHECK-NEXT: [[IBAD:%.*]] = phi i16 [ [[V2:%.*]], [[B0]] ], [ [[V3:%.*]], [[B1]] ]
-; CHECK-NEXT: store i32 [[I1]], i32* [[D0:%.*]], align 4
-; CHECK-NEXT: store i32 [[I1]], i32* [[D1:%.*]], align 4
-; CHECK-NEXT: store i16 [[IBAD]], i16* [[D2:%.*]], align 2
+; CHECK-NEXT: store i32 [[I1]], ptr [[D0:%.*]], align 4
+; CHECK-NEXT: store i32 [[I1]], ptr [[D1:%.*]], align 4
+; CHECK-NEXT: store i16 [[IBAD]], ptr [[D2:%.*]], align 2
; CHECK-NEXT: ret void
;
entry:
%i0 = phi i32 [ %v0, %b0 ], [ %v1, %b1 ]
%i1 = phi i32 [ %v0, %b0 ], [ %v1, %b1 ]
%iBAD = phi i16 [ %v2, %b0 ], [ %v3, %b1 ]
- store i32 %i0, i32* %d0
- store i32 %i1, i32* %d1
- store i16 %iBAD, i16* %d2
+ store i32 %i0, ptr %d0
+ store i32 %i1, ptr %d1
+ store i16 %iBAD, ptr %d2
ret void
}
; Make sure GVN won't undo the transformation:
; RUN: opt -passes=instcombine,gvn -S < %s | FileCheck %s --check-prefixes=ALL,INSTCOMBINEGVN
-declare i8* @get_ptr.i8()
-declare i32* @get_ptr.i32()
-declare void @foo.i8(i8*)
-declare void @foo.i32(i32*)
+declare ptr @get_ptr.i8()
+declare ptr @get_ptr.i32()
+declare void @foo.i8(ptr)
+declare void @foo.i32(ptr)
define i32 @test_gep_and_bitcast(i1 %cond, i1 %cond2) {
; ALL-LABEL: @test_gep_and_bitcast(
; ALL-NEXT: entry:
-; ALL-NEXT: [[OBJ:%.*]] = call i8* @get_ptr.i8()
+; ALL-NEXT: [[OBJ:%.*]] = call ptr @get_ptr.i8()
; ALL-NEXT: br i1 [[COND:%.*]], label [[BB1:%.*]], label [[BB2:%.*]]
; ALL: bb1:
; ALL-NEXT: br label [[EXIT:%.*]]
; ALL: bb2:
; ALL-NEXT: br label [[EXIT]]
; ALL: exit:
-; ALL-NEXT: [[PTR_TYPED_IN:%.*]] = getelementptr inbounds i8, i8* [[OBJ]], i64 16
-; ALL-NEXT: [[PTR_TYPED:%.*]] = bitcast i8* [[PTR_TYPED_IN]] to i32*
-; ALL-NEXT: [[RES_PHI:%.*]] = load i32, i32* [[PTR_TYPED]], align 4
-; ALL-NEXT: store i32 1, i32* [[PTR_TYPED]], align 4
+; ALL-NEXT: [[PTR_TYPED_IN:%.*]] = getelementptr inbounds i8, ptr [[OBJ]], i64 16
+; ALL-NEXT: [[RES_PHI:%.*]] = load i32, ptr [[PTR_TYPED_IN]], align 4
+; ALL-NEXT: store i32 1, ptr [[PTR_TYPED_IN]], align 4
; ALL-NEXT: [[RES:%.*]] = select i1 [[COND2:%.*]], i32 [[RES_PHI]], i32 1
; ALL-NEXT: ret i32 [[RES]]
;
entry:
- %obj = call i8* @get_ptr.i8()
+ %obj = call ptr @get_ptr.i8()
br i1 %cond, label %bb1, label %bb2
bb1:
- %ptr1 = getelementptr inbounds i8, i8* %obj, i64 16
- %ptr1.typed = bitcast i8* %ptr1 to i32*
- %res1 = load i32, i32* %ptr1.typed
+ %ptr1 = getelementptr inbounds i8, ptr %obj, i64 16
+ %res1 = load i32, ptr %ptr1
br label %exit
bb2:
- %ptr2 = getelementptr inbounds i8, i8* %obj, i64 16
- %ptr2.typed = bitcast i8* %ptr2 to i32*
- %res2 = load i32, i32* %ptr2.typed
+ %ptr2 = getelementptr inbounds i8, ptr %obj, i64 16
+ %res2 = load i32, ptr %ptr2
br label %exit
exit:
- %ptr.typed = phi i32* [ %ptr1.typed, %bb1 ], [ %ptr2.typed, %bb2 ]
+ %ptr.typed = phi ptr [ %ptr1, %bb1 ], [ %ptr2, %bb2 ]
%res.phi = phi i32 [ %res1, %bb1 ], [ %res2, %bb2 ]
- store i32 1, i32* %ptr.typed
- %res.load = load i32, i32* %ptr.typed
+ store i32 1, ptr %ptr.typed
+ %res.load = load i32, ptr %ptr.typed
%res = select i1 %cond2, i32 %res.phi, i32 %res.load
ret i32 %res
}
-define i32 @test_gep_and_bitcast_arg(i8* %obj, i1 %cond, i1 %cond2) {
+define i32 @test_gep_and_bitcast_arg(ptr %obj, i1 %cond, i1 %cond2) {
; ALL-LABEL: @test_gep_and_bitcast_arg(
; ALL-NEXT: entry:
; ALL-NEXT: br i1 [[COND:%.*]], label [[BB1:%.*]], label [[BB2:%.*]]
; ALL: bb2:
; ALL-NEXT: br label [[EXIT]]
; ALL: exit:
-; ALL-NEXT: [[PTR_TYPED_IN:%.*]] = getelementptr inbounds i8, i8* [[OBJ:%.*]], i64 16
-; ALL-NEXT: [[PTR_TYPED:%.*]] = bitcast i8* [[PTR_TYPED_IN]] to i32*
-; ALL-NEXT: [[RES_PHI:%.*]] = load i32, i32* [[PTR_TYPED]], align 4
-; ALL-NEXT: store i32 1, i32* [[PTR_TYPED]], align 4
+; ALL-NEXT: [[PTR_TYPED_IN:%.*]] = getelementptr inbounds i8, ptr [[OBJ:%.*]], i64 16
+; ALL-NEXT: [[RES_PHI:%.*]] = load i32, ptr [[PTR_TYPED_IN]], align 4
+; ALL-NEXT: store i32 1, ptr [[PTR_TYPED_IN]], align 4
; ALL-NEXT: [[RES:%.*]] = select i1 [[COND2:%.*]], i32 [[RES_PHI]], i32 1
; ALL-NEXT: ret i32 [[RES]]
;
br i1 %cond, label %bb1, label %bb2
bb1:
- %ptr1 = getelementptr inbounds i8, i8* %obj, i64 16
- %ptr1.typed = bitcast i8* %ptr1 to i32*
- %res1 = load i32, i32* %ptr1.typed
+ %ptr1 = getelementptr inbounds i8, ptr %obj, i64 16
+ %res1 = load i32, ptr %ptr1
br label %exit
bb2:
- %ptr2 = getelementptr inbounds i8, i8* %obj, i64 16
- %ptr2.typed = bitcast i8* %ptr2 to i32*
- %res2 = load i32, i32* %ptr2.typed
+ %ptr2 = getelementptr inbounds i8, ptr %obj, i64 16
+ %res2 = load i32, ptr %ptr2
br label %exit
exit:
- %ptr.typed = phi i32* [ %ptr1.typed, %bb1 ], [ %ptr2.typed, %bb2 ]
+ %ptr.typed = phi ptr [ %ptr1, %bb1 ], [ %ptr2, %bb2 ]
%res.phi = phi i32 [ %res1, %bb1 ], [ %res2, %bb2 ]
- store i32 1, i32* %ptr.typed
- %res.load = load i32, i32* %ptr.typed
+ store i32 1, ptr %ptr.typed
+ %res.load = load i32, ptr %ptr.typed
%res = select i1 %cond2, i32 %res.phi, i32 %res.load
ret i32 %res
}
; ALL-NEXT: entry:
; ALL-NEXT: br i1 [[COND:%.*]], label [[BB1:%.*]], label [[BB2:%.*]]
; ALL: bb1:
-; ALL-NEXT: [[OBJ1:%.*]] = call i8* @get_ptr.i8()
+; ALL-NEXT: [[OBJ1:%.*]] = call ptr @get_ptr.i8()
; ALL-NEXT: br label [[MERGE:%.*]]
; ALL: bb2:
-; ALL-NEXT: [[OBJ2_TYPED:%.*]] = call i32* @get_ptr.i32()
-; ALL-NEXT: [[OBJ2:%.*]] = bitcast i32* [[OBJ2_TYPED]] to i8*
+; ALL-NEXT: [[OBJ2_TYPED:%.*]] = call ptr @get_ptr.i32()
; ALL-NEXT: br label [[MERGE]]
; ALL: merge:
-; ALL-NEXT: [[OBJ:%.*]] = phi i8* [ [[OBJ1]], [[BB1]] ], [ [[OBJ2]], [[BB2]] ]
-; ALL-NEXT: [[ANOTHER_PHI:%.*]] = phi i8* [ [[OBJ1]], [[BB1]] ], [ null, [[BB2]] ]
-; ALL-NEXT: call void @foo.i8(i8* [[ANOTHER_PHI]])
+; ALL-NEXT: [[OBJ:%.*]] = phi ptr [ [[OBJ1]], [[BB1]] ], [ [[OBJ2_TYPED]], [[BB2]] ]
+; ALL-NEXT: [[ANOTHER_PHI:%.*]] = phi ptr [ [[OBJ1]], [[BB1]] ], [ null, [[BB2]] ]
+; ALL-NEXT: call void @foo.i8(ptr [[ANOTHER_PHI]])
; ALL-NEXT: br i1 [[COND2:%.*]], label [[BB3:%.*]], label [[BB4:%.*]]
; ALL: bb3:
; ALL-NEXT: br label [[EXIT:%.*]]
; ALL: bb4:
; ALL-NEXT: br label [[EXIT]]
; ALL: exit:
-; ALL-NEXT: [[PTR_TYPED_IN:%.*]] = getelementptr inbounds i8, i8* [[OBJ]], i64 16
-; ALL-NEXT: [[PTR_TYPED:%.*]] = bitcast i8* [[PTR_TYPED_IN]] to i32*
-; ALL-NEXT: [[RES_PHI:%.*]] = load i32, i32* [[PTR_TYPED]], align 4
-; ALL-NEXT: store i32 1, i32* [[PTR_TYPED]], align 4
+; ALL-NEXT: [[PTR_TYPED_IN:%.*]] = getelementptr inbounds i8, ptr [[OBJ]], i64 16
+; ALL-NEXT: [[RES_PHI:%.*]] = load i32, ptr [[PTR_TYPED_IN]], align 4
+; ALL-NEXT: store i32 1, ptr [[PTR_TYPED_IN]], align 4
; ALL-NEXT: [[RES:%.*]] = select i1 [[COND3:%.*]], i32 [[RES_PHI]], i32 1
; ALL-NEXT: ret i32 [[RES]]
;
br i1 %cond, label %bb1, label %bb2
bb1:
- %obj1 = call i8* @get_ptr.i8()
+ %obj1 = call ptr @get_ptr.i8()
br label %merge
bb2:
- %obj2.typed = call i32* @get_ptr.i32()
- %obj2 = bitcast i32* %obj2.typed to i8*
+ %obj2.typed = call ptr @get_ptr.i32()
br label %merge
merge:
- %obj = phi i8* [ %obj1, %bb1 ], [ %obj2, %bb2 ]
- %another_phi = phi i8* [ %obj1, %bb1 ], [ null, %bb2 ]
- call void @foo.i8(i8* %another_phi)
+ %obj = phi ptr [ %obj1, %bb1 ], [ %obj2.typed, %bb2 ]
+ %another_phi = phi ptr [ %obj1, %bb1 ], [ null, %bb2 ]
+ call void @foo.i8(ptr %another_phi)
br i1 %cond2, label %bb3, label %bb4
bb3:
- %ptr1 = getelementptr inbounds i8, i8* %obj, i64 16
- %ptr1.typed = bitcast i8* %ptr1 to i32*
- %res1 = load i32, i32* %ptr1.typed
+ %ptr1 = getelementptr inbounds i8, ptr %obj, i64 16
+ %res1 = load i32, ptr %ptr1
br label %exit
bb4:
- %ptr2 = getelementptr inbounds i8, i8* %obj, i64 16
- %ptr2.typed = bitcast i8* %ptr2 to i32*
- %res2 = load i32, i32* %ptr2.typed
+ %ptr2 = getelementptr inbounds i8, ptr %obj, i64 16
+ %res2 = load i32, ptr %ptr2
br label %exit
exit:
- %ptr.typed = phi i32* [ %ptr1.typed, %bb3 ], [ %ptr2.typed, %bb4 ]
+ %ptr.typed = phi ptr [ %ptr1, %bb3 ], [ %ptr2, %bb4 ]
%res.phi = phi i32 [ %res1, %bb3 ], [ %res2, %bb4 ]
- store i32 1, i32* %ptr.typed
- %res.load = load i32, i32* %ptr.typed
+ store i32 1, ptr %ptr.typed
+ %res.load = load i32, ptr %ptr.typed
%res = select i1 %cond3, i32 %res.phi, i32 %res.load
ret i32 %res
}
define i32 @test_gep_i32ptr(i1 %cond, i1 %cond2) {
; ALL-LABEL: @test_gep_i32ptr(
; ALL-NEXT: entry:
-; ALL-NEXT: [[OBJ:%.*]] = call i32* @get_ptr.i32()
+; ALL-NEXT: [[OBJ:%.*]] = call ptr @get_ptr.i32()
; ALL-NEXT: br i1 [[COND:%.*]], label [[BB1:%.*]], label [[BB2:%.*]]
; ALL: bb1:
; ALL-NEXT: br label [[EXIT:%.*]]
; ALL: bb2:
; ALL-NEXT: br label [[EXIT]]
; ALL: exit:
-; ALL-NEXT: [[PTR_TYPED:%.*]] = getelementptr inbounds i32, i32* [[OBJ]], i64 16
-; ALL-NEXT: [[RES_PHI:%.*]] = load i32, i32* [[PTR_TYPED]], align 4
-; ALL-NEXT: store i32 1, i32* [[PTR_TYPED]], align 4
+; ALL-NEXT: [[PTR_TYPED:%.*]] = getelementptr inbounds i32, ptr [[OBJ]], i64 16
+; ALL-NEXT: [[RES_PHI:%.*]] = load i32, ptr [[PTR_TYPED]], align 4
+; ALL-NEXT: store i32 1, ptr [[PTR_TYPED]], align 4
; ALL-NEXT: [[RES:%.*]] = select i1 [[COND2:%.*]], i32 [[RES_PHI]], i32 1
; ALL-NEXT: ret i32 [[RES]]
;
entry:
- %obj = call i32* @get_ptr.i32()
+ %obj = call ptr @get_ptr.i32()
br i1 %cond, label %bb1, label %bb2
bb1:
- %ptr1.typed = getelementptr inbounds i32, i32* %obj, i64 16
- %res1 = load i32, i32* %ptr1.typed
+ %ptr1.typed = getelementptr inbounds i32, ptr %obj, i64 16
+ %res1 = load i32, ptr %ptr1.typed
br label %exit
bb2:
- %ptr2.typed = getelementptr inbounds i32, i32* %obj, i64 16
- %res2 = load i32, i32* %ptr2.typed
+ %ptr2.typed = getelementptr inbounds i32, ptr %obj, i64 16
+ %res2 = load i32, ptr %ptr2.typed
br label %exit
exit:
- %ptr.typed = phi i32* [ %ptr1.typed, %bb1 ], [ %ptr2.typed, %bb2 ]
+ %ptr.typed = phi ptr [ %ptr1.typed, %bb1 ], [ %ptr2.typed, %bb2 ]
%res.phi = phi i32 [ %res1, %bb1 ], [ %res2, %bb2 ]
- store i32 1, i32* %ptr.typed
- %res.load = load i32, i32* %ptr.typed
+ store i32 1, ptr %ptr.typed
+ %res.load = load i32, ptr %ptr.typed
%res = select i1 %cond2, i32 %res.phi, i32 %res.load
ret i32 %res
}
define i32 @test_gep_and_bitcast_gep_base_ptr(i1 %cond, i1 %cond2) {
; ALL-LABEL: @test_gep_and_bitcast_gep_base_ptr(
; ALL-NEXT: entry:
-; ALL-NEXT: [[OBJ0:%.*]] = call i8* @get_ptr.i8()
+; ALL-NEXT: [[OBJ0:%.*]] = call ptr @get_ptr.i8()
; ALL-NEXT: br i1 [[COND:%.*]], label [[BB1:%.*]], label [[BB2:%.*]]
; ALL: bb1:
; ALL-NEXT: br label [[EXIT:%.*]]
; ALL: bb2:
; ALL-NEXT: br label [[EXIT]]
; ALL: exit:
-; ALL-NEXT: [[PTR_TYPED_IN:%.*]] = getelementptr inbounds i8, i8* [[OBJ0]], i64 32
-; ALL-NEXT: [[PTR_TYPED:%.*]] = bitcast i8* [[PTR_TYPED_IN]] to i32*
-; ALL-NEXT: [[RES_PHI:%.*]] = load i32, i32* [[PTR_TYPED]], align 4
-; ALL-NEXT: store i32 1, i32* [[PTR_TYPED]], align 4
+; ALL-NEXT: [[PTR_TYPED_IN:%.*]] = getelementptr inbounds i8, ptr [[OBJ0]], i64 32
+; ALL-NEXT: [[RES_PHI:%.*]] = load i32, ptr [[PTR_TYPED_IN]], align 4
+; ALL-NEXT: store i32 1, ptr [[PTR_TYPED_IN]], align 4
; ALL-NEXT: [[RES:%.*]] = select i1 [[COND2:%.*]], i32 [[RES_PHI]], i32 1
; ALL-NEXT: ret i32 [[RES]]
;
entry:
- %obj0 = call i8* @get_ptr.i8()
- %obj = getelementptr inbounds i8, i8* %obj0, i64 16
+ %obj0 = call ptr @get_ptr.i8()
+ %obj = getelementptr inbounds i8, ptr %obj0, i64 16
br i1 %cond, label %bb1, label %bb2
bb1:
- %ptr1 = getelementptr inbounds i8, i8* %obj, i64 16
- %ptr1.typed = bitcast i8* %ptr1 to i32*
- %res1 = load i32, i32* %ptr1.typed
+ %ptr1 = getelementptr inbounds i8, ptr %obj, i64 16
+ %res1 = load i32, ptr %ptr1
br label %exit
bb2:
- %ptr2 = getelementptr inbounds i8, i8* %obj, i64 16
- %ptr2.typed = bitcast i8* %ptr2 to i32*
- %res2 = load i32, i32* %ptr2.typed
+ %ptr2 = getelementptr inbounds i8, ptr %obj, i64 16
+ %res2 = load i32, ptr %ptr2
br label %exit
exit:
- %ptr.typed = phi i32* [ %ptr1.typed, %bb1 ], [ %ptr2.typed, %bb2 ]
+ %ptr.typed = phi ptr [ %ptr1, %bb1 ], [ %ptr2, %bb2 ]
%res.phi = phi i32 [ %res1, %bb1 ], [ %res2, %bb2 ]
- store i32 1, i32* %ptr.typed
- %res.load = load i32, i32* %ptr.typed
+ store i32 1, ptr %ptr.typed
+ %res.load = load i32, ptr %ptr.typed
%res = select i1 %cond2, i32 %res.phi, i32 %res.load
ret i32 %res
}
define i32 @test_gep_and_bitcast_same_bb(i1 %cond, i1 %cond2) {
; ALL-LABEL: @test_gep_and_bitcast_same_bb(
; ALL-NEXT: entry:
-; ALL-NEXT: [[OBJ:%.*]] = call i8* @get_ptr.i8()
+; ALL-NEXT: [[OBJ:%.*]] = call ptr @get_ptr.i8()
; ALL-NEXT: br i1 [[COND:%.*]], label [[EXIT:%.*]], label [[BB2:%.*]]
; ALL: bb2:
; ALL-NEXT: br label [[EXIT]]
; ALL: exit:
-; ALL-NEXT: [[PTR_TYPED_IN:%.*]] = getelementptr inbounds i8, i8* [[OBJ]], i64 16
-; ALL-NEXT: [[PTR_TYPED:%.*]] = bitcast i8* [[PTR_TYPED_IN]] to i32*
-; ALL-NEXT: [[RES_PHI:%.*]] = load i32, i32* [[PTR_TYPED]], align 4
-; ALL-NEXT: store i32 1, i32* [[PTR_TYPED]], align 4
+; ALL-NEXT: [[PTR_TYPED_IN:%.*]] = getelementptr inbounds i8, ptr [[OBJ]], i64 16
+; ALL-NEXT: [[RES_PHI:%.*]] = load i32, ptr [[PTR_TYPED_IN]], align 4
+; ALL-NEXT: store i32 1, ptr [[PTR_TYPED_IN]], align 4
; ALL-NEXT: [[RES:%.*]] = select i1 [[COND2:%.*]], i32 [[RES_PHI]], i32 1
; ALL-NEXT: ret i32 [[RES]]
;
entry:
- %obj = call i8* @get_ptr.i8()
- %ptr1 = getelementptr inbounds i8, i8* %obj, i64 16
- %ptr1.typed = bitcast i8* %ptr1 to i32*
- %res1 = load i32, i32* %ptr1.typed
+ %obj = call ptr @get_ptr.i8()
+ %ptr1 = getelementptr inbounds i8, ptr %obj, i64 16
+ %res1 = load i32, ptr %ptr1
br i1 %cond, label %exit, label %bb2
bb2:
- %ptr2 = getelementptr inbounds i8, i8* %obj, i64 16
- %ptr2.typed = bitcast i8* %ptr2 to i32*
- %res2 = load i32, i32* %ptr2.typed
+ %ptr2 = getelementptr inbounds i8, ptr %obj, i64 16
+ %res2 = load i32, ptr %ptr2
br label %exit
exit:
- %ptr.typed = phi i32* [ %ptr1.typed, %entry ], [ %ptr2.typed, %bb2 ]
+ %ptr.typed = phi ptr [ %ptr1, %entry ], [ %ptr2, %bb2 ]
%res.phi = phi i32 [ %res1, %entry ], [ %res2, %bb2 ]
- store i32 1, i32* %ptr.typed
- %res.load = load i32, i32* %ptr.typed
+ store i32 1, ptr %ptr.typed
+ %res.load = load i32, ptr %ptr.typed
%res = select i1 %cond2, i32 %res.phi, i32 %res.load
ret i32 %res
}
define i32 @test_gep_and_bitcast_same_bb_and_extra_use(i1 %cond, i1 %cond2) {
; INSTCOMBINE-LABEL: @test_gep_and_bitcast_same_bb_and_extra_use(
; INSTCOMBINE-NEXT: entry:
-; INSTCOMBINE-NEXT: [[OBJ:%.*]] = call i8* @get_ptr.i8()
-; INSTCOMBINE-NEXT: [[PTR1:%.*]] = getelementptr inbounds i8, i8* [[OBJ]], i64 16
-; INSTCOMBINE-NEXT: [[PTR1_TYPED:%.*]] = bitcast i8* [[PTR1]] to i32*
-; INSTCOMBINE-NEXT: call void @foo.i32(i32* nonnull [[PTR1_TYPED]])
+; INSTCOMBINE-NEXT: [[OBJ:%.*]] = call ptr @get_ptr.i8()
+; INSTCOMBINE-NEXT: [[PTR1:%.*]] = getelementptr inbounds i8, ptr [[OBJ]], i64 16
+; INSTCOMBINE-NEXT: call void @foo.i32(ptr nonnull [[PTR1]])
; INSTCOMBINE-NEXT: br i1 [[COND:%.*]], label [[EXIT:%.*]], label [[BB2:%.*]]
; INSTCOMBINE: bb2:
-; INSTCOMBINE-NEXT: [[PTR2:%.*]] = getelementptr inbounds i8, i8* [[OBJ]], i64 16
-; INSTCOMBINE-NEXT: [[PTR2_TYPED:%.*]] = bitcast i8* [[PTR2]] to i32*
+; INSTCOMBINE-NEXT: [[PTR2:%.*]] = getelementptr inbounds i8, ptr [[OBJ]], i64 16
; INSTCOMBINE-NEXT: br label [[EXIT]]
; INSTCOMBINE: exit:
-; INSTCOMBINE-NEXT: [[PTR_TYPED:%.*]] = phi i32* [ [[PTR1_TYPED]], [[ENTRY:%.*]] ], [ [[PTR2_TYPED]], [[BB2]] ]
-; INSTCOMBINE-NEXT: [[RES_PHI:%.*]] = load i32, i32* [[PTR_TYPED]], align 4
-; INSTCOMBINE-NEXT: store i32 1, i32* [[PTR_TYPED]], align 4
+; INSTCOMBINE-NEXT: [[PTR_TYPED:%.*]] = phi ptr [ [[PTR1]], [[ENTRY:%.*]] ], [ [[PTR2]], [[BB2]] ]
+; INSTCOMBINE-NEXT: [[RES_PHI:%.*]] = load i32, ptr [[PTR_TYPED]], align 4
+; INSTCOMBINE-NEXT: store i32 1, ptr [[PTR_TYPED]], align 4
; INSTCOMBINE-NEXT: [[RES:%.*]] = select i1 [[COND2:%.*]], i32 [[RES_PHI]], i32 1
; INSTCOMBINE-NEXT: ret i32 [[RES]]
;
; INSTCOMBINEGVN-LABEL: @test_gep_and_bitcast_same_bb_and_extra_use(
; INSTCOMBINEGVN-NEXT: entry:
-; INSTCOMBINEGVN-NEXT: [[OBJ:%.*]] = call i8* @get_ptr.i8()
-; INSTCOMBINEGVN-NEXT: [[PTR1:%.*]] = getelementptr inbounds i8, i8* [[OBJ]], i64 16
-; INSTCOMBINEGVN-NEXT: [[PTR1_TYPED:%.*]] = bitcast i8* [[PTR1]] to i32*
-; INSTCOMBINEGVN-NEXT: call void @foo.i32(i32* nonnull [[PTR1_TYPED]])
+; INSTCOMBINEGVN-NEXT: [[OBJ:%.*]] = call ptr @get_ptr.i8()
+; INSTCOMBINEGVN-NEXT: [[PTR1:%.*]] = getelementptr inbounds i8, ptr [[OBJ]], i64 16
+; INSTCOMBINEGVN-NEXT: call void @foo.i32(ptr nonnull [[PTR1]])
; INSTCOMBINEGVN-NEXT: br i1 [[COND:%.*]], label [[EXIT:%.*]], label [[BB2:%.*]]
; INSTCOMBINEGVN: bb2:
; INSTCOMBINEGVN-NEXT: br label [[EXIT]]
; INSTCOMBINEGVN: exit:
-; INSTCOMBINEGVN-NEXT: [[RES_PHI:%.*]] = load i32, i32* [[PTR1_TYPED]], align 4
-; INSTCOMBINEGVN-NEXT: store i32 1, i32* [[PTR1_TYPED]], align 4
+; INSTCOMBINEGVN-NEXT: [[RES_PHI:%.*]] = load i32, ptr [[PTR1]], align 4
+; INSTCOMBINEGVN-NEXT: store i32 1, ptr [[PTR1]], align 4
; INSTCOMBINEGVN-NEXT: [[RES:%.*]] = select i1 [[COND2:%.*]], i32 [[RES_PHI]], i32 1
; INSTCOMBINEGVN-NEXT: ret i32 [[RES]]
;
entry:
- %obj = call i8* @get_ptr.i8()
- %ptr1 = getelementptr inbounds i8, i8* %obj, i64 16
- %ptr1.typed = bitcast i8* %ptr1 to i32*
- call void @foo.i32(i32* %ptr1.typed)
- %res1 = load i32, i32* %ptr1.typed
+ %obj = call ptr @get_ptr.i8()
+ %ptr1 = getelementptr inbounds i8, ptr %obj, i64 16
+ call void @foo.i32(ptr %ptr1)
+ %res1 = load i32, ptr %ptr1
br i1 %cond, label %exit, label %bb2
bb2:
- %ptr2 = getelementptr inbounds i8, i8* %obj, i64 16
- %ptr2.typed = bitcast i8* %ptr2 to i32*
- %res2 = load i32, i32* %ptr2.typed
+ %ptr2 = getelementptr inbounds i8, ptr %obj, i64 16
+ %res2 = load i32, ptr %ptr2
br label %exit
exit:
- %ptr.typed = phi i32* [ %ptr1.typed, %entry ], [ %ptr2.typed, %bb2 ]
+ %ptr.typed = phi ptr [ %ptr1, %entry ], [ %ptr2, %bb2 ]
%res.phi = phi i32 [ %res1, %entry ], [ %res2, %bb2 ]
- store i32 1, i32* %ptr.typed
- %res.load = load i32, i32* %ptr.typed
+ store i32 1, ptr %ptr.typed
+ %res.load = load i32, ptr %ptr.typed
%res = select i1 %cond2, i32 %res.phi, i32 %res.load
ret i32 %res
}
define i8 @test_gep(i1 %cond, i1 %cond2) {
; ALL-LABEL: @test_gep(
; ALL-NEXT: entry:
-; ALL-NEXT: [[OBJ:%.*]] = call i8* @get_ptr.i8()
+; ALL-NEXT: [[OBJ:%.*]] = call ptr @get_ptr.i8()
; ALL-NEXT: br i1 [[COND:%.*]], label [[BB1:%.*]], label [[BB2:%.*]]
; ALL: bb1:
; ALL-NEXT: br label [[EXIT:%.*]]
; ALL: bb2:
; ALL-NEXT: br label [[EXIT]]
; ALL: exit:
-; ALL-NEXT: [[PTR_TYPED:%.*]] = getelementptr inbounds i8, i8* [[OBJ]], i64 16
-; ALL-NEXT: [[RES_PHI:%.*]] = load i8, i8* [[PTR_TYPED]], align 1
-; ALL-NEXT: store i8 1, i8* [[PTR_TYPED]], align 1
+; ALL-NEXT: [[PTR_TYPED:%.*]] = getelementptr inbounds i8, ptr [[OBJ]], i64 16
+; ALL-NEXT: [[RES_PHI:%.*]] = load i8, ptr [[PTR_TYPED]], align 1
+; ALL-NEXT: store i8 1, ptr [[PTR_TYPED]], align 1
; ALL-NEXT: [[RES:%.*]] = select i1 [[COND2:%.*]], i8 [[RES_PHI]], i8 1
; ALL-NEXT: ret i8 [[RES]]
;
entry:
- %obj = call i8* @get_ptr.i8()
+ %obj = call ptr @get_ptr.i8()
br i1 %cond, label %bb1, label %bb2
bb1:
- %ptr1 = getelementptr inbounds i8, i8* %obj, i64 16
- %res1 = load i8, i8* %ptr1
+ %ptr1 = getelementptr inbounds i8, ptr %obj, i64 16
+ %res1 = load i8, ptr %ptr1
br label %exit
bb2:
- %ptr2 = getelementptr inbounds i8, i8* %obj, i64 16
- %res2 = load i8, i8* %ptr2
+ %ptr2 = getelementptr inbounds i8, ptr %obj, i64 16
+ %res2 = load i8, ptr %ptr2
br label %exit
exit:
- %ptr.typed = phi i8* [ %ptr1, %bb1 ], [ %ptr2, %bb2 ]
+ %ptr.typed = phi ptr [ %ptr1, %bb1 ], [ %ptr2, %bb2 ]
%res.phi = phi i8 [ %res1, %bb1 ], [ %res2, %bb2 ]
- store i8 1, i8* %ptr.typed
- %res.load = load i8, i8* %ptr.typed
+ store i8 1, ptr %ptr.typed
+ %res.load = load i8, ptr %ptr.typed
%res = select i1 %cond2, i8 %res.phi, i8 %res.load
ret i8 %res
}
define i32 @test_extra_uses(i1 %cond, i1 %cond2) {
; ALL-LABEL: @test_extra_uses(
; ALL-NEXT: entry:
-; ALL-NEXT: [[OBJ:%.*]] = call i8* @get_ptr.i8()
+; ALL-NEXT: [[OBJ:%.*]] = call ptr @get_ptr.i8()
; ALL-NEXT: br i1 [[COND:%.*]], label [[BB1:%.*]], label [[BB2:%.*]]
; ALL: bb1:
-; ALL-NEXT: [[PTR1:%.*]] = getelementptr inbounds i8, i8* [[OBJ]], i64 16
-; ALL-NEXT: [[PTR1_TYPED:%.*]] = bitcast i8* [[PTR1]] to i32*
-; ALL-NEXT: [[RES1:%.*]] = load i32, i32* [[PTR1_TYPED]], align 4
-; ALL-NEXT: call void @foo.i32(i32* nonnull [[PTR1_TYPED]])
+; ALL-NEXT: [[PTR1:%.*]] = getelementptr inbounds i8, ptr [[OBJ]], i64 16
+; ALL-NEXT: [[RES1:%.*]] = load i32, ptr [[PTR1]], align 4
+; ALL-NEXT: call void @foo.i32(ptr nonnull [[PTR1]])
; ALL-NEXT: br label [[EXIT:%.*]]
; ALL: bb2:
-; ALL-NEXT: [[PTR2:%.*]] = getelementptr inbounds i8, i8* [[OBJ]], i64 16
-; ALL-NEXT: [[PTR2_TYPED:%.*]] = bitcast i8* [[PTR2]] to i32*
-; ALL-NEXT: [[RES2:%.*]] = load i32, i32* [[PTR2_TYPED]], align 4
-; ALL-NEXT: call void @foo.i32(i32* nonnull [[PTR2_TYPED]])
+; ALL-NEXT: [[PTR2:%.*]] = getelementptr inbounds i8, ptr [[OBJ]], i64 16
+; ALL-NEXT: [[RES2:%.*]] = load i32, ptr [[PTR2]], align 4
+; ALL-NEXT: call void @foo.i32(ptr nonnull [[PTR2]])
; ALL-NEXT: br label [[EXIT]]
; ALL: exit:
-; ALL-NEXT: [[PTR_TYPED:%.*]] = phi i32* [ [[PTR1_TYPED]], [[BB1]] ], [ [[PTR2_TYPED]], [[BB2]] ]
+; ALL-NEXT: [[PTR_TYPED:%.*]] = phi ptr [ [[PTR1]], [[BB1]] ], [ [[PTR2]], [[BB2]] ]
; ALL-NEXT: [[RES_PHI:%.*]] = phi i32 [ [[RES1]], [[BB1]] ], [ [[RES2]], [[BB2]] ]
-; ALL-NEXT: store i32 1, i32* [[PTR_TYPED]], align 4
+; ALL-NEXT: store i32 1, ptr [[PTR_TYPED]], align 4
; ALL-NEXT: [[RES:%.*]] = select i1 [[COND2:%.*]], i32 [[RES_PHI]], i32 1
; ALL-NEXT: ret i32 [[RES]]
;
entry:
- %obj = call i8* @get_ptr.i8()
+ %obj = call ptr @get_ptr.i8()
br i1 %cond, label %bb1, label %bb2
bb1:
- %ptr1 = getelementptr inbounds i8, i8* %obj, i64 16
- %ptr1.typed = bitcast i8* %ptr1 to i32*
- %res1 = load i32, i32* %ptr1.typed
- call void @foo.i32(i32* %ptr1.typed)
+ %ptr1 = getelementptr inbounds i8, ptr %obj, i64 16
+ %res1 = load i32, ptr %ptr1
+ call void @foo.i32(ptr %ptr1)
br label %exit
bb2:
- %ptr2 = getelementptr inbounds i8, i8* %obj, i64 16
- %ptr2.typed = bitcast i8* %ptr2 to i32*
- %res2 = load i32, i32* %ptr2.typed
- call void @foo.i32(i32* %ptr2.typed)
+ %ptr2 = getelementptr inbounds i8, ptr %obj, i64 16
+ %res2 = load i32, ptr %ptr2
+ call void @foo.i32(ptr %ptr2)
br label %exit
exit:
- %ptr.typed = phi i32* [ %ptr1.typed, %bb1 ], [ %ptr2.typed, %bb2 ]
+ %ptr.typed = phi ptr [ %ptr1, %bb1 ], [ %ptr2, %bb2 ]
%res.phi = phi i32 [ %res1, %bb1 ], [ %res2, %bb2 ]
- store i32 1, i32* %ptr.typed
- %res.load = load i32, i32* %ptr.typed
+ store i32 1, ptr %ptr.typed
+ %res.load = load i32, ptr %ptr.typed
%res = select i1 %cond2, i32 %res.phi, i32 %res.load
ret i32 %res
}
define i32 @test_extra_uses_non_inbounds(i1 %cond, i1 %cond2) {
; ALL-LABEL: @test_extra_uses_non_inbounds(
; ALL-NEXT: entry:
-; ALL-NEXT: [[OBJ:%.*]] = call i8* @get_ptr.i8()
+; ALL-NEXT: [[OBJ:%.*]] = call ptr @get_ptr.i8()
; ALL-NEXT: br i1 [[COND:%.*]], label [[BB1:%.*]], label [[BB2:%.*]]
; ALL: bb1:
-; ALL-NEXT: [[PTR1:%.*]] = getelementptr i8, i8* [[OBJ]], i64 16
-; ALL-NEXT: [[PTR1_TYPED:%.*]] = bitcast i8* [[PTR1]] to i32*
-; ALL-NEXT: [[RES1:%.*]] = load i32, i32* [[PTR1_TYPED]], align 4
-; ALL-NEXT: call void @foo.i32(i32* nonnull [[PTR1_TYPED]])
+; ALL-NEXT: [[PTR1:%.*]] = getelementptr i8, ptr [[OBJ]], i64 16
+; ALL-NEXT: [[RES1:%.*]] = load i32, ptr [[PTR1]], align 4
+; ALL-NEXT: call void @foo.i32(ptr nonnull [[PTR1]])
; ALL-NEXT: br label [[EXIT:%.*]]
; ALL: bb2:
-; ALL-NEXT: [[PTR2:%.*]] = getelementptr i8, i8* [[OBJ]], i64 16
-; ALL-NEXT: [[PTR2_TYPED:%.*]] = bitcast i8* [[PTR2]] to i32*
-; ALL-NEXT: [[RES2:%.*]] = load i32, i32* [[PTR2_TYPED]], align 4
-; ALL-NEXT: call void @foo.i32(i32* nonnull [[PTR2_TYPED]])
+; ALL-NEXT: [[PTR2:%.*]] = getelementptr i8, ptr [[OBJ]], i64 16
+; ALL-NEXT: [[RES2:%.*]] = load i32, ptr [[PTR2]], align 4
+; ALL-NEXT: call void @foo.i32(ptr nonnull [[PTR2]])
; ALL-NEXT: br label [[EXIT]]
; ALL: exit:
-; ALL-NEXT: [[PTR_TYPED:%.*]] = phi i32* [ [[PTR1_TYPED]], [[BB1]] ], [ [[PTR2_TYPED]], [[BB2]] ]
+; ALL-NEXT: [[PTR_TYPED:%.*]] = phi ptr [ [[PTR1]], [[BB1]] ], [ [[PTR2]], [[BB2]] ]
; ALL-NEXT: [[RES_PHI:%.*]] = phi i32 [ [[RES1]], [[BB1]] ], [ [[RES2]], [[BB2]] ]
-; ALL-NEXT: store i32 1, i32* [[PTR_TYPED]], align 4
+; ALL-NEXT: store i32 1, ptr [[PTR_TYPED]], align 4
; ALL-NEXT: [[RES:%.*]] = select i1 [[COND2:%.*]], i32 [[RES_PHI]], i32 1
; ALL-NEXT: ret i32 [[RES]]
;
entry:
- %obj = call i8* @get_ptr.i8()
+ %obj = call ptr @get_ptr.i8()
br i1 %cond, label %bb1, label %bb2
bb1:
- %ptr1 = getelementptr i8, i8* %obj, i64 16
- %ptr1.typed = bitcast i8* %ptr1 to i32*
- %res1 = load i32, i32* %ptr1.typed
- call void @foo.i32(i32* %ptr1.typed)
+ %ptr1 = getelementptr i8, ptr %obj, i64 16
+ %res1 = load i32, ptr %ptr1
+ call void @foo.i32(ptr %ptr1)
br label %exit
bb2:
- %ptr2 = getelementptr i8, i8* %obj, i64 16
- %ptr2.typed = bitcast i8* %ptr2 to i32*
- %res2 = load i32, i32* %ptr2.typed
- call void @foo.i32(i32* %ptr2.typed)
+ %ptr2 = getelementptr i8, ptr %obj, i64 16
+ %res2 = load i32, ptr %ptr2
+ call void @foo.i32(ptr %ptr2)
br label %exit
exit:
- %ptr.typed = phi i32* [ %ptr1.typed, %bb1 ], [ %ptr2.typed, %bb2 ]
+ %ptr.typed = phi ptr [ %ptr1, %bb1 ], [ %ptr2, %bb2 ]
%res.phi = phi i32 [ %res1, %bb1 ], [ %res2, %bb2 ]
- store i32 1, i32* %ptr.typed
- %res.load = load i32, i32* %ptr.typed
+ store i32 1, ptr %ptr.typed
+ %res.load = load i32, ptr %ptr.typed
%res = select i1 %cond2, i32 %res.phi, i32 %res.load
ret i32 %res
}
define i32 @test_extra_uses_multiple_geps(i1 %cond, i1 %cond2) {
; ALL-LABEL: @test_extra_uses_multiple_geps(
; ALL-NEXT: entry:
-; ALL-NEXT: [[OBJ:%.*]] = call i8* @get_ptr.i8()
+; ALL-NEXT: [[OBJ:%.*]] = call ptr @get_ptr.i8()
; ALL-NEXT: br i1 [[COND:%.*]], label [[BB1:%.*]], label [[BB2:%.*]]
; ALL: bb1:
-; ALL-NEXT: [[PTR1:%.*]] = getelementptr inbounds i8, i8* [[OBJ]], i64 16
-; ALL-NEXT: [[PTR1_TYPED:%.*]] = bitcast i8* [[PTR1]] to i32*
-; ALL-NEXT: [[RES1:%.*]] = load i32, i32* [[PTR1_TYPED]], align 4
-; ALL-NEXT: call void @foo.i32(i32* nonnull [[PTR1_TYPED]])
+; ALL-NEXT: [[PTR1:%.*]] = getelementptr inbounds i8, ptr [[OBJ]], i64 16
+; ALL-NEXT: [[RES1:%.*]] = load i32, ptr [[PTR1]], align 4
+; ALL-NEXT: call void @foo.i32(ptr nonnull [[PTR1]])
; ALL-NEXT: br label [[EXIT:%.*]]
; ALL: bb2:
-; ALL-NEXT: [[PTR2_1:%.*]] = getelementptr i8, i8* [[OBJ]], i64 16
-; ALL-NEXT: [[PTR2_TYPED:%.*]] = bitcast i8* [[PTR2_1]] to i32*
-; ALL-NEXT: [[RES2:%.*]] = load i32, i32* [[PTR2_TYPED]], align 4
-; ALL-NEXT: call void @foo.i32(i32* nonnull [[PTR2_TYPED]])
+; ALL-NEXT: [[PTR2_1:%.*]] = getelementptr i8, ptr [[OBJ]], i64 16
+; ALL-NEXT: [[RES2:%.*]] = load i32, ptr [[PTR2_1]], align 4
+; ALL-NEXT: call void @foo.i32(ptr nonnull [[PTR2_1]])
; ALL-NEXT: br label [[EXIT]]
; ALL: exit:
-; ALL-NEXT: [[PTR_TYPED:%.*]] = phi i32* [ [[PTR1_TYPED]], [[BB1]] ], [ [[PTR2_TYPED]], [[BB2]] ]
+; ALL-NEXT: [[PTR_TYPED:%.*]] = phi ptr [ [[PTR1]], [[BB1]] ], [ [[PTR2_1]], [[BB2]] ]
; ALL-NEXT: [[RES_PHI:%.*]] = phi i32 [ [[RES1]], [[BB1]] ], [ [[RES2]], [[BB2]] ]
-; ALL-NEXT: store i32 1, i32* [[PTR_TYPED]], align 4
+; ALL-NEXT: store i32 1, ptr [[PTR_TYPED]], align 4
; ALL-NEXT: [[RES:%.*]] = select i1 [[COND2:%.*]], i32 [[RES_PHI]], i32 1
; ALL-NEXT: ret i32 [[RES]]
;
entry:
- %obj = call i8* @get_ptr.i8()
+ %obj = call ptr @get_ptr.i8()
br i1 %cond, label %bb1, label %bb2
bb1:
- %ptr1 = getelementptr inbounds i8, i8* %obj, i64 16
- %ptr1.typed = bitcast i8* %ptr1 to i32*
- %res1 = load i32, i32* %ptr1.typed
- call void @foo.i32(i32* %ptr1.typed)
+ %ptr1 = getelementptr inbounds i8, ptr %obj, i64 16
+ %res1 = load i32, ptr %ptr1
+ call void @foo.i32(ptr %ptr1)
br label %exit
bb2:
- %ptr2.0 = getelementptr i8, i8* %obj, i64 8
- %ptr2.1 = getelementptr inbounds i8, i8* %ptr2.0, i64 8
- %ptr2.typed = bitcast i8* %ptr2.1 to i32*
- %res2 = load i32, i32* %ptr2.typed
- call void @foo.i32(i32* %ptr2.typed)
+ %ptr2.0 = getelementptr i8, ptr %obj, i64 8
+ %ptr2.1 = getelementptr inbounds i8, ptr %ptr2.0, i64 8
+ %res2 = load i32, ptr %ptr2.1
+ call void @foo.i32(ptr %ptr2.1)
br label %exit
exit:
- %ptr.typed = phi i32* [ %ptr1.typed, %bb1 ], [ %ptr2.typed, %bb2 ]
+ %ptr.typed = phi ptr [ %ptr1, %bb1 ], [ %ptr2.1, %bb2 ]
%res.phi = phi i32 [ %res1, %bb1 ], [ %res2, %bb2 ]
- store i32 1, i32* %ptr.typed
- %res.load = load i32, i32* %ptr.typed
+ store i32 1, ptr %ptr.typed
+ %res.load = load i32, ptr %ptr.typed
%res = select i1 %cond2, i32 %res.phi, i32 %res.load
ret i32 %res
}
define i8 @test_gep_extra_uses(i1 %cond, i1 %cond2) {
; ALL-LABEL: @test_gep_extra_uses(
; ALL-NEXT: entry:
-; ALL-NEXT: [[OBJ:%.*]] = call i8* @get_ptr.i8()
+; ALL-NEXT: [[OBJ:%.*]] = call ptr @get_ptr.i8()
; ALL-NEXT: br i1 [[COND:%.*]], label [[BB1:%.*]], label [[BB2:%.*]]
; ALL: bb1:
-; ALL-NEXT: [[PTR1:%.*]] = getelementptr inbounds i8, i8* [[OBJ]], i64 16
-; ALL-NEXT: [[RES1:%.*]] = load i8, i8* [[PTR1]], align 1
-; ALL-NEXT: call void @foo.i8(i8* nonnull [[PTR1]])
+; ALL-NEXT: [[PTR1:%.*]] = getelementptr inbounds i8, ptr [[OBJ]], i64 16
+; ALL-NEXT: [[RES1:%.*]] = load i8, ptr [[PTR1]], align 1
+; ALL-NEXT: call void @foo.i8(ptr nonnull [[PTR1]])
; ALL-NEXT: br label [[EXIT:%.*]]
; ALL: bb2:
-; ALL-NEXT: [[PTR2:%.*]] = getelementptr inbounds i8, i8* [[OBJ]], i64 16
-; ALL-NEXT: [[RES2:%.*]] = load i8, i8* [[PTR2]], align 1
-; ALL-NEXT: call void @foo.i8(i8* nonnull [[PTR2]])
+; ALL-NEXT: [[PTR2:%.*]] = getelementptr inbounds i8, ptr [[OBJ]], i64 16
+; ALL-NEXT: [[RES2:%.*]] = load i8, ptr [[PTR2]], align 1
+; ALL-NEXT: call void @foo.i8(ptr nonnull [[PTR2]])
; ALL-NEXT: br label [[EXIT]]
; ALL: exit:
-; ALL-NEXT: [[PTR_TYPED:%.*]] = phi i8* [ [[PTR1]], [[BB1]] ], [ [[PTR2]], [[BB2]] ]
+; ALL-NEXT: [[PTR_TYPED:%.*]] = phi ptr [ [[PTR1]], [[BB1]] ], [ [[PTR2]], [[BB2]] ]
; ALL-NEXT: [[RES_PHI:%.*]] = phi i8 [ [[RES1]], [[BB1]] ], [ [[RES2]], [[BB2]] ]
-; ALL-NEXT: store i8 1, i8* [[PTR_TYPED]], align 1
+; ALL-NEXT: store i8 1, ptr [[PTR_TYPED]], align 1
; ALL-NEXT: [[RES:%.*]] = select i1 [[COND2:%.*]], i8 [[RES_PHI]], i8 1
; ALL-NEXT: ret i8 [[RES]]
;
entry:
- %obj = call i8* @get_ptr.i8()
+ %obj = call ptr @get_ptr.i8()
br i1 %cond, label %bb1, label %bb2
bb1:
- %ptr1 = getelementptr inbounds i8, i8* %obj, i64 16
- %res1 = load i8, i8* %ptr1
- call void @foo.i8(i8* %ptr1)
+ %ptr1 = getelementptr inbounds i8, ptr %obj, i64 16
+ %res1 = load i8, ptr %ptr1
+ call void @foo.i8(ptr %ptr1)
br label %exit
bb2:
- %ptr2 = getelementptr inbounds i8, i8* %obj, i64 16
- %res2 = load i8, i8* %ptr2
- call void @foo.i8(i8* %ptr2)
+ %ptr2 = getelementptr inbounds i8, ptr %obj, i64 16
+ %res2 = load i8, ptr %ptr2
+ call void @foo.i8(ptr %ptr2)
br label %exit
exit:
- %ptr.typed = phi i8* [ %ptr1, %bb1 ], [ %ptr2, %bb2 ]
+ %ptr.typed = phi ptr [ %ptr1, %bb1 ], [ %ptr2, %bb2 ]
%res.phi = phi i8 [ %res1, %bb1 ], [ %res2, %bb2 ]
- store i8 1, i8* %ptr.typed
- %res.load = load i8, i8* %ptr.typed
+ store i8 1, ptr %ptr.typed
+ %res.load = load i8, ptr %ptr.typed
%res = select i1 %cond2, i8 %res.phi, i8 %res.load
ret i8 %res
}
; `swifterror` addresses are restricted to load and stores and call arguments.
-declare void @takeAddress(i8** swifterror)
+declare void @takeAddress(ptr swifterror)
-define i8* @test_dont_optimize_swifterror(i1 %cond, i1 %cond2, i8* %ptr) {
+define ptr @test_dont_optimize_swifterror(i1 %cond, i1 %cond2, ptr %ptr) {
; INSTCOMBINE-LABEL: @test_dont_optimize_swifterror(
; INSTCOMBINE-NEXT: entry:
-; INSTCOMBINE-NEXT: [[OBJ:%.*]] = alloca swifterror i8*, align 8
-; INSTCOMBINE-NEXT: [[OBJ2:%.*]] = alloca swifterror i8*, align 8
-; INSTCOMBINE-NEXT: call void @takeAddress(i8** nonnull swifterror [[OBJ]])
-; INSTCOMBINE-NEXT: call void @takeAddress(i8** nonnull swifterror [[OBJ2]])
-; INSTCOMBINE-NEXT: store i8* [[PTR:%.*]], i8** [[OBJ]], align 8
+; INSTCOMBINE-NEXT: [[OBJ:%.*]] = alloca swifterror ptr, align 8
+; INSTCOMBINE-NEXT: [[OBJ2:%.*]] = alloca swifterror ptr, align 8
+; INSTCOMBINE-NEXT: call void @takeAddress(ptr nonnull swifterror [[OBJ]])
+; INSTCOMBINE-NEXT: call void @takeAddress(ptr nonnull swifterror [[OBJ2]])
+; INSTCOMBINE-NEXT: store ptr [[PTR:%.*]], ptr [[OBJ]], align 8
; INSTCOMBINE-NEXT: br i1 [[COND:%.*]], label [[BB1:%.*]], label [[BB2:%.*]]
; INSTCOMBINE: bb1:
-; INSTCOMBINE-NEXT: [[RES1:%.*]] = load i8*, i8** [[OBJ]], align 8
+; INSTCOMBINE-NEXT: [[RES1:%.*]] = load ptr, ptr [[OBJ]], align 8
; INSTCOMBINE-NEXT: br label [[EXIT:%.*]]
; INSTCOMBINE: bb2:
-; INSTCOMBINE-NEXT: [[RES2:%.*]] = load i8*, i8** [[OBJ2]], align 8
+; INSTCOMBINE-NEXT: [[RES2:%.*]] = load ptr, ptr [[OBJ2]], align 8
; INSTCOMBINE-NEXT: br label [[EXIT]]
; INSTCOMBINE: exit:
-; INSTCOMBINE-NEXT: [[RES_PHI:%.*]] = phi i8* [ [[RES1]], [[BB1]] ], [ [[RES2]], [[BB2]] ]
-; INSTCOMBINE-NEXT: store i8* null, i8** [[OBJ]], align 8
-; INSTCOMBINE-NEXT: [[RES:%.*]] = select i1 [[COND2:%.*]], i8* [[RES_PHI]], i8* null
-; INSTCOMBINE-NEXT: ret i8* [[RES]]
+; INSTCOMBINE-NEXT: [[RES_PHI:%.*]] = phi ptr [ [[RES1]], [[BB1]] ], [ [[RES2]], [[BB2]] ]
+; INSTCOMBINE-NEXT: store ptr null, ptr [[OBJ]], align 8
+; INSTCOMBINE-NEXT: [[RES:%.*]] = select i1 [[COND2:%.*]], ptr [[RES_PHI]], ptr null
+; INSTCOMBINE-NEXT: ret ptr [[RES]]
;
; INSTCOMBINEGVN-LABEL: @test_dont_optimize_swifterror(
; INSTCOMBINEGVN-NEXT: entry:
-; INSTCOMBINEGVN-NEXT: [[OBJ:%.*]] = alloca swifterror i8*, align 8
-; INSTCOMBINEGVN-NEXT: [[OBJ2:%.*]] = alloca swifterror i8*, align 8
-; INSTCOMBINEGVN-NEXT: call void @takeAddress(i8** nonnull swifterror [[OBJ]])
-; INSTCOMBINEGVN-NEXT: call void @takeAddress(i8** nonnull swifterror [[OBJ2]])
-; INSTCOMBINEGVN-NEXT: store i8* [[PTR:%.*]], i8** [[OBJ]], align 8
+; INSTCOMBINEGVN-NEXT: [[OBJ:%.*]] = alloca swifterror ptr, align 8
+; INSTCOMBINEGVN-NEXT: [[OBJ2:%.*]] = alloca swifterror ptr, align 8
+; INSTCOMBINEGVN-NEXT: call void @takeAddress(ptr nonnull swifterror [[OBJ]])
+; INSTCOMBINEGVN-NEXT: call void @takeAddress(ptr nonnull swifterror [[OBJ2]])
+; INSTCOMBINEGVN-NEXT: store ptr [[PTR:%.*]], ptr [[OBJ]], align 8
; INSTCOMBINEGVN-NEXT: br i1 [[COND:%.*]], label [[BB1:%.*]], label [[BB2:%.*]]
; INSTCOMBINEGVN: bb1:
; INSTCOMBINEGVN-NEXT: br label [[EXIT:%.*]]
; INSTCOMBINEGVN: bb2:
-; INSTCOMBINEGVN-NEXT: [[RES2:%.*]] = load i8*, i8** [[OBJ2]], align 8
+; INSTCOMBINEGVN-NEXT: [[RES2:%.*]] = load ptr, ptr [[OBJ2]], align 8
; INSTCOMBINEGVN-NEXT: br label [[EXIT]]
; INSTCOMBINEGVN: exit:
-; INSTCOMBINEGVN-NEXT: [[RES_PHI:%.*]] = phi i8* [ [[PTR]], [[BB1]] ], [ [[RES2]], [[BB2]] ]
-; INSTCOMBINEGVN-NEXT: store i8* null, i8** [[OBJ]], align 8
-; INSTCOMBINEGVN-NEXT: [[RES:%.*]] = select i1 [[COND2:%.*]], i8* [[RES_PHI]], i8* null
-; INSTCOMBINEGVN-NEXT: ret i8* [[RES]]
+; INSTCOMBINEGVN-NEXT: [[RES_PHI:%.*]] = phi ptr [ [[PTR]], [[BB1]] ], [ [[RES2]], [[BB2]] ]
+; INSTCOMBINEGVN-NEXT: store ptr null, ptr [[OBJ]], align 8
+; INSTCOMBINEGVN-NEXT: [[RES:%.*]] = select i1 [[COND2:%.*]], ptr [[RES_PHI]], ptr null
+; INSTCOMBINEGVN-NEXT: ret ptr [[RES]]
;
entry:
- %obj = alloca swifterror i8*, align 8
- %obj2 = alloca swifterror i8*, align 8
- call void @takeAddress(i8** swifterror %obj)
- call void @takeAddress(i8** swifterror %obj2)
- store i8* %ptr, i8** %obj, align 8
+ %obj = alloca swifterror ptr, align 8
+ %obj2 = alloca swifterror ptr, align 8
+ call void @takeAddress(ptr swifterror %obj)
+ call void @takeAddress(ptr swifterror %obj2)
+ store ptr %ptr, ptr %obj, align 8
br i1 %cond, label %bb1, label %bb2
bb1: ; preds = %entry
- %res1 = load i8*, i8** %obj, align 8
+ %res1 = load ptr, ptr %obj, align 8
br label %exit
bb2: ; preds = %entry
- %res2 = load i8*, i8** %obj2, align 8
+ %res2 = load ptr, ptr %obj2, align 8
br label %exit
exit: ; preds = %bb2, %bb1
- %res.phi = phi i8* [ %res1, %bb1 ], [ %res2, %bb2 ]
- store i8* null, i8** %obj, align 8
- %res = select i1 %cond2, i8* %res.phi, i8* null
- ret i8* %res
+ %res.phi = phi ptr [ %res1, %bb1 ], [ %res2, %bb2 ]
+ store ptr null, ptr %obj, align 8
+ %res = select i1 %cond2, ptr %res.phi, ptr null
+ ret ptr %res
}
; convert ptrtoint [ phi[ inttoptr (ptrtoint (x) ) ] ---> ptrtoint (phi[x])
-define i64 @func(i32** %X, i32** %Y, i1 %cond) {
+define i64 @func(ptr %X, ptr %Y, i1 %cond) {
; CHECK-LABEL: @func(
; CHECK-NEXT: br i1 [[COND:%.*]], label [[BB1:%.*]], label [[BB2:%.*]]
; CHECK: bb1:
; CHECK: bb2:
; CHECK-NEXT: br label [[EXIT]]
; CHECK: exit:
-; CHECK-NEXT: [[PHI_IN_IN:%.*]] = phi i32** [ [[X:%.*]], [[BB1]] ], [ [[Y:%.*]], [[BB2]] ]
-; CHECK-NEXT: [[PHI_IN:%.*]] = ptrtoint i32** [[PHI_IN_IN]] to i64
+; CHECK-NEXT: [[PHI_IN_IN:%.*]] = phi ptr [ [[X:%.*]], [[BB1]] ], [ [[Y:%.*]], [[BB2]] ]
+; CHECK-NEXT: [[PHI_IN:%.*]] = ptrtoint ptr [[PHI_IN_IN]] to i64
; CHECK-NEXT: ret i64 [[PHI_IN]]
;
br i1 %cond, label %bb1, label %bb2
bb1:
- %X.i = ptrtoint i32** %X to i64
- %X.p = inttoptr i64 %X.i to i32*
+ %X.i = ptrtoint ptr %X to i64
+ %X.p = inttoptr i64 %X.i to ptr
br label %exit
bb2:
- %Y.i = ptrtoint i32** %Y to i64
- %Y.p = inttoptr i64 %Y.i to i32*
+ %Y.i = ptrtoint ptr %Y to i64
+ %Y.p = inttoptr i64 %Y.i to ptr
br label %exit
exit:
- %phi = phi i32* [%X.p, %bb1], [%Y.p, %bb2]
- %X.p.i = ptrtoint i32* %phi to i64
+ %phi = phi ptr [%X.p, %bb1], [%Y.p, %bb2]
+ %X.p.i = ptrtoint ptr %phi to i64
ret i64 %X.p.i
}
-define i64 @func_single_operand(i32** %X, i32** %Y, i1 %cond) {
+define i64 @func_single_operand(ptr %X, ptr %Y, i1 %cond) {
; CHECK-LABEL: @func_single_operand(
; CHECK-NEXT: br i1 [[COND:%.*]], label [[BB1:%.*]], label [[EXIT:%.*]]
; CHECK: bb1:
; CHECK-NEXT: br label [[EXIT]]
; CHECK: exit:
-; CHECK-NEXT: [[PHI_IN:%.*]] = phi i32** [ [[X:%.*]], [[BB1]] ], [ [[Y:%.*]], [[TMP0:%.*]] ]
-; CHECK-NEXT: [[X_P_I:%.*]] = ptrtoint i32** [[PHI_IN]] to i64
+; CHECK-NEXT: [[PHI_IN:%.*]] = phi ptr [ [[X:%.*]], [[BB1]] ], [ [[Y:%.*]], [[TMP0:%.*]] ]
+; CHECK-NEXT: [[X_P_I:%.*]] = ptrtoint ptr [[PHI_IN]] to i64
; CHECK-NEXT: ret i64 [[X_P_I]]
;
- %Y.p = bitcast i32** %Y to i32*
br i1 %cond, label %bb1, label %exit
bb1:
- %X.i = ptrtoint i32** %X to i64
- %X.p = inttoptr i64 %X.i to i32*
+ %X.i = ptrtoint ptr %X to i64
+ %X.p = inttoptr i64 %X.i to ptr
br label %exit
exit:
- %phi = phi i32* [%X.p, %bb1], [%Y.p, %0]
- %X.p.i = ptrtoint i32* %phi to i64
+ %phi = phi ptr [%X.p, %bb1], [%Y, %0]
+ %X.p.i = ptrtoint ptr %phi to i64
ret i64 %X.p.i
}
-define i64 @func_pointer_different_types(i16** %X, i32** %Y, i1 %cond) {
+define i64 @func_pointer_different_types(ptr %X, ptr %Y, i1 %cond) {
; CHECK-LABEL: @func_pointer_different_types(
-; CHECK-NEXT: [[Y_P:%.*]] = bitcast i32** [[Y:%.*]] to i32*
; CHECK-NEXT: br i1 [[COND:%.*]], label [[BB1:%.*]], label [[EXIT:%.*]]
; CHECK: bb1:
-; CHECK-NEXT: [[TMP1:%.*]] = bitcast i16** [[X:%.*]] to i32*
; CHECK-NEXT: br label [[EXIT]]
; CHECK: exit:
-; CHECK-NEXT: [[PHI:%.*]] = phi i32* [ [[TMP1]], [[BB1]] ], [ [[Y_P]], [[TMP0:%.*]] ]
-; CHECK-NEXT: [[X_P_I:%.*]] = ptrtoint i32* [[PHI]] to i64
+; CHECK-NEXT: [[PHI:%.*]] = phi ptr [ [[X:%.*]], [[BB1]] ], [ [[Y:%.*]], [[TMP0:%.*]] ]
+; CHECK-NEXT: [[X_P_I:%.*]] = ptrtoint ptr [[PHI]] to i64
; CHECK-NEXT: ret i64 [[X_P_I]]
;
- %Y.p = bitcast i32** %Y to i32*
br i1 %cond, label %bb1, label %exit
bb1:
- %X.i = ptrtoint i16** %X to i64
- %X.p = inttoptr i64 %X.i to i32*
+ %X.i = ptrtoint ptr %X to i64
+ %X.p = inttoptr i64 %X.i to ptr
br label %exit
exit:
- %phi = phi i32* [%X.p, %bb1], [%Y.p, %0]
- %X.p.i = ptrtoint i32* %phi to i64
+ %phi = phi ptr [%X.p, %bb1], [%Y, %0]
+ %X.p.i = ptrtoint ptr %phi to i64
ret i64 %X.p.i
}
; Negative test - Wrong Integer type
-define i64 @func_integer_type_too_small(i32** %X, i32** %Y, i1 %cond) {
+define i64 @func_integer_type_too_small(ptr %X, ptr %Y, i1 %cond) {
; CHECK-LABEL: @func_integer_type_too_small(
-; CHECK-NEXT: [[Y_P:%.*]] = bitcast i32** [[Y:%.*]] to i32*
; CHECK-NEXT: br i1 [[COND:%.*]], label [[BB1:%.*]], label [[EXIT:%.*]]
; CHECK: bb1:
-; CHECK-NEXT: [[TMP1:%.*]] = ptrtoint i32** [[X:%.*]] to i64
+; CHECK-NEXT: [[TMP1:%.*]] = ptrtoint ptr [[X:%.*]] to i64
; CHECK-NEXT: [[TMP2:%.*]] = and i64 [[TMP1]], 4294967295
-; CHECK-NEXT: [[X_P:%.*]] = inttoptr i64 [[TMP2]] to i32*
+; CHECK-NEXT: [[X_P:%.*]] = inttoptr i64 [[TMP2]] to ptr
; CHECK-NEXT: br label [[EXIT]]
; CHECK: exit:
-; CHECK-NEXT: [[PHI:%.*]] = phi i32* [ [[X_P]], [[BB1]] ], [ [[Y_P]], [[TMP0:%.*]] ]
-; CHECK-NEXT: [[X_P_I:%.*]] = ptrtoint i32* [[PHI]] to i64
+; CHECK-NEXT: [[PHI:%.*]] = phi ptr [ [[X_P]], [[BB1]] ], [ [[Y:%.*]], [[TMP0:%.*]] ]
+; CHECK-NEXT: [[X_P_I:%.*]] = ptrtoint ptr [[PHI]] to i64
; CHECK-NEXT: ret i64 [[X_P_I]]
;
- %Y.p = bitcast i32** %Y to i32*
br i1 %cond, label %bb1, label %exit
bb1:
- %X.i = ptrtoint i32** %X to i32
- %X.p = inttoptr i32 %X.i to i32*
+ %X.i = ptrtoint ptr %X to i32
+ %X.p = inttoptr i32 %X.i to ptr
br label %exit
exit:
- %phi = phi i32* [%X.p, %bb1], [%Y.p, %0]
- %X.p.i = ptrtoint i32* %phi to i64
+ %phi = phi ptr [%X.p, %bb1], [%Y, %0]
+ %X.p.i = ptrtoint ptr %phi to i64
ret i64 %X.p.i
}
; Negative test - phi not used in ptrtoint
-define i32* @func_phi_not_use_in_ptr2int(i32** %X, i32** %Y, i1 %cond) {
+define ptr @func_phi_not_use_in_ptr2int(ptr %X, ptr %Y, i1 %cond) {
; CHECK-LABEL: @func_phi_not_use_in_ptr2int(
-; CHECK-NEXT: [[Y_P:%.*]] = bitcast i32** [[Y:%.*]] to i32*
; CHECK-NEXT: br i1 [[COND:%.*]], label [[BB1:%.*]], label [[EXIT:%.*]]
; CHECK: bb1:
-; CHECK-NEXT: [[TMP1:%.*]] = ptrtoint i32** [[X:%.*]] to i64
+; CHECK-NEXT: [[TMP1:%.*]] = ptrtoint ptr [[X:%.*]] to i64
; CHECK-NEXT: [[TMP2:%.*]] = and i64 [[TMP1]], 4294967295
-; CHECK-NEXT: [[X_P:%.*]] = inttoptr i64 [[TMP2]] to i32*
+; CHECK-NEXT: [[X_P:%.*]] = inttoptr i64 [[TMP2]] to ptr
; CHECK-NEXT: br label [[EXIT]]
; CHECK: exit:
-; CHECK-NEXT: [[PHI:%.*]] = phi i32* [ [[X_P]], [[BB1]] ], [ [[Y_P]], [[TMP0:%.*]] ]
-; CHECK-NEXT: ret i32* [[PHI]]
+; CHECK-NEXT: [[PHI:%.*]] = phi ptr [ [[X_P]], [[BB1]] ], [ [[Y:%.*]], [[TMP0:%.*]] ]
+; CHECK-NEXT: ret ptr [[PHI]]
;
- %Y.p = bitcast i32** %Y to i32*
br i1 %cond, label %bb1, label %exit
bb1:
- %X.i = ptrtoint i32** %X to i32
- %X.p = inttoptr i32 %X.i to i32*
+ %X.i = ptrtoint ptr %X to i32
+ %X.p = inttoptr i32 %X.i to ptr
br label %exit
exit:
- %phi = phi i32* [%X.p, %bb1], [%Y.p, %0]
- ret i32* %phi
+ %phi = phi ptr [%X.p, %bb1], [%Y, %0]
+ ret ptr %phi
}
; Negative test - Pointers in different address spaces
-define i64 @func_ptr_different_addrspace(i16 addrspace(2)* %X, i32** %Y, i1 %cond) {
+define i64 @func_ptr_different_addrspace(ptr addrspace(2) %X, ptr %Y, i1 %cond) {
; CHECK-LABEL: @func_ptr_different_addrspace(
-; CHECK-NEXT: [[Y_P:%.*]] = bitcast i32** [[Y:%.*]] to i32*
; CHECK-NEXT: br i1 [[COND:%.*]], label [[BB1:%.*]], label [[EXIT:%.*]]
; CHECK: bb1:
-; CHECK-NEXT: [[TMP1:%.*]] = ptrtoint i16 addrspace(2)* [[X:%.*]] to i32
+; CHECK-NEXT: [[TMP1:%.*]] = ptrtoint ptr addrspace(2) [[X:%.*]] to i32
; CHECK-NEXT: [[X_I:%.*]] = zext i32 [[TMP1]] to i64
-; CHECK-NEXT: [[X_P:%.*]] = inttoptr i64 [[X_I]] to i32*
+; CHECK-NEXT: [[X_P:%.*]] = inttoptr i64 [[X_I]] to ptr
; CHECK-NEXT: br label [[EXIT]]
; CHECK: exit:
-; CHECK-NEXT: [[PHI:%.*]] = phi i32* [ [[X_P]], [[BB1]] ], [ [[Y_P]], [[TMP0:%.*]] ]
-; CHECK-NEXT: [[X_P_I:%.*]] = ptrtoint i32* [[PHI]] to i64
+; CHECK-NEXT: [[PHI:%.*]] = phi ptr [ [[X_P]], [[BB1]] ], [ [[Y:%.*]], [[TMP0:%.*]] ]
+; CHECK-NEXT: [[X_P_I:%.*]] = ptrtoint ptr [[PHI]] to i64
; CHECK-NEXT: ret i64 [[X_P_I]]
;
- %Y.p = bitcast i32** %Y to i32*
br i1 %cond, label %bb1, label %exit
bb1:
- %X.i = ptrtoint i16 addrspace(2)* %X to i64
- %X.p = inttoptr i64 %X.i to i32*
+ %X.i = ptrtoint ptr addrspace(2) %X to i64
+ %X.p = inttoptr i64 %X.i to ptr
br label %exit
exit:
- %phi = phi i32* [%X.p, %bb1], [%Y.p, %0]
- %X.p.i = ptrtoint i32* %phi to i64
+ %phi = phi ptr [%X.p, %bb1], [%Y, %0]
+ %X.p.i = ptrtoint ptr %phi to i64
ret i64 %X.p.i
}
; Check that dereferenceable metadata is combined
; CHECK-LABEL: cont:
-; CHECK: load i32*, i32**
+; CHECK: load ptr, ptr
; CHECK-SAME: !dereferenceable ![[DEREF:[0-9]+]]
-define i32* @test_phi_combine_load_metadata(i1 %c, i32** dereferenceable(8) %p1, i32** dereferenceable(8) %p2) {
+define ptr @test_phi_combine_load_metadata(i1 %c, ptr dereferenceable(8) %p1, ptr dereferenceable(8) %p2) {
br i1 %c, label %t, label %f
t:
call void @bar()
- %v1 = load i32*, i32** %p1, align 8, !dereferenceable !0
+ %v1 = load ptr, ptr %p1, align 8, !dereferenceable !0
br label %cont
f:
call void @baz()
- %v2 = load i32*, i32** %p2, align 8, !dereferenceable !1
+ %v2 = load ptr, ptr %p2, align 8, !dereferenceable !1
br label %cont
cont:
- %res = phi i32* [ %v1, %t ], [ %v2, %f ]
- ret i32* %res
+ %res = phi ptr [ %v1, %t ], [ %v2, %f ]
+ ret ptr %res
}
; CHECK: ![[DEREF]] = !{i64 8}
; Check that dereferenceable_or_null metadata is combined
; CHECK-LABEL: cont:
-; CHECK: load i32*, i32**
+; CHECK: load ptr, ptr
; CHECK-SAME: !dereferenceable_or_null ![[DEREF:[0-9]+]]
-define i32* @test_phi_combine_load_metadata(i1 %c, i32** dereferenceable(8) %p1, i32** dereferenceable(8) %p2) {
+define ptr @test_phi_combine_load_metadata(i1 %c, ptr dereferenceable(8) %p1, ptr dereferenceable(8) %p2) {
br i1 %c, label %t, label %f
t:
call void @bar()
- %v1 = load i32*, i32** %p1, align 8, !dereferenceable_or_null !0
+ %v1 = load ptr, ptr %p1, align 8, !dereferenceable_or_null !0
br label %cont
f:
call void @baz()
- %v2 = load i32*, i32** %p2, align 8, !dereferenceable_or_null !1
+ %v2 = load ptr, ptr %p2, align 8, !dereferenceable_or_null !1
br label %cont
cont:
- %res = phi i32* [ %v1, %t ], [ %v2, %f ]
- ret i32* %res
+ %res = phi ptr [ %v1, %t ], [ %v2, %f ]
+ ret ptr %res
}
; CHECK: ![[DEREF]] = !{i64 8}
; Check that nonnull metadata is from non-dominating loads is not propagated.
; CHECK-LABEL: cont:
; CHECK-NOT: !nonnull
-define i32* @test_combine_metadata_dominance(i1 %c, i32** dereferenceable(8) %p1, i32** dereferenceable(8) %p2) {
+define ptr @test_combine_metadata_dominance(i1 %c, ptr dereferenceable(8) %p1, ptr dereferenceable(8) %p2) {
br i1 %c, label %t, label %f
t:
call void @bar()
- %v1 = load i32*, i32** %p1, align 8, !nonnull !0
+ %v1 = load ptr, ptr %p1, align 8, !nonnull !0
br label %cont
f:
call void @baz()
- %v2 = load i32*, i32** %p2, align 8
+ %v2 = load ptr, ptr %p2, align 8
br label %cont
cont:
- %res = phi i32* [ %v1, %t ], [ %v2, %f ]
- ret i32* %res
+ %res = phi ptr [ %v1, %t ], [ %v2, %f ]
+ ret ptr %res
}
!0 = !{}
; Check that align metadata is combined
; CHECK-LABEL: cont:
-; CHECK: load i32*, i32**
+; CHECK: load ptr, ptr
; CHECK-SAME: !align ![[ALIGN:[0-9]+]]
-define i32* @test_phi_combine_load_metadata(i1 %c, i32** dereferenceable(8) %p1, i32** dereferenceable(8) %p2) {
+define ptr @test_phi_combine_load_metadata(i1 %c, ptr dereferenceable(8) %p1, ptr dereferenceable(8) %p2) {
br i1 %c, label %t, label %f
t:
call void @bar()
- %v1 = load i32*, i32** %p1, align 8, !align !0
+ %v1 = load ptr, ptr %p1, align 8, !align !0
br label %cont
f:
call void @baz()
- %v2 = load i32*, i32** %p2, align 8, !align !1
+ %v2 = load ptr, ptr %p2, align 8, !align !1
br label %cont
cont:
- %res = phi i32* [ %v1, %t ], [ %v2, %f ]
- ret i32* %res
+ %res = phi ptr [ %v1, %t ], [ %v2, %f ]
+ ret ptr %res
}
; CHECK: ![[ALIGN]] = !{i64 8}
; Don't push the geps through these phis, because they would require
; two phis each, which burdens the loop with high register pressure.
-define void @foo(float* %Ar, float* %Ai, i64 %As, float* %Cr, float* %Ci, i64 %Cs, i64 %n) nounwind {
+define void @foo(ptr %Ar, ptr %Ai, i64 %As, ptr %Cr, ptr %Ci, i64 %Cs, i64 %n) nounwind {
entry:
- %0 = getelementptr inbounds float, float* %Ar, i64 0 ; <float*> [#uses=1]
- %1 = getelementptr inbounds float, float* %Ai, i64 0 ; <float*> [#uses=1]
+ %0 = getelementptr inbounds float, ptr %Ar, i64 0 ; <ptr> [#uses=1]
+ %1 = getelementptr inbounds float, ptr %Ai, i64 0 ; <ptr> [#uses=1]
%2 = mul i64 %n, %As ; <i64> [#uses=1]
- %3 = getelementptr inbounds float, float* %Ar, i64 %2 ; <float*> [#uses=1]
+ %3 = getelementptr inbounds float, ptr %Ar, i64 %2 ; <ptr> [#uses=1]
%4 = mul i64 %n, %As ; <i64> [#uses=1]
- %5 = getelementptr inbounds float, float* %Ai, i64 %4 ; <float*> [#uses=1]
+ %5 = getelementptr inbounds float, ptr %Ai, i64 %4 ; <ptr> [#uses=1]
%6 = mul i64 %n, 2 ; <i64> [#uses=1]
%7 = mul i64 %6, %As ; <i64> [#uses=1]
- %8 = getelementptr inbounds float, float* %Ar, i64 %7 ; <float*> [#uses=1]
+ %8 = getelementptr inbounds float, ptr %Ar, i64 %7 ; <ptr> [#uses=1]
%9 = mul i64 %n, 2 ; <i64> [#uses=1]
%10 = mul i64 %9, %As ; <i64> [#uses=1]
- %11 = getelementptr inbounds float, float* %Ai, i64 %10 ; <float*> [#uses=1]
- %12 = getelementptr inbounds float, float* %Cr, i64 0 ; <float*> [#uses=1]
- %13 = getelementptr inbounds float, float* %Ci, i64 0 ; <float*> [#uses=1]
+ %11 = getelementptr inbounds float, ptr %Ai, i64 %10 ; <ptr> [#uses=1]
+ %12 = getelementptr inbounds float, ptr %Cr, i64 0 ; <ptr> [#uses=1]
+ %13 = getelementptr inbounds float, ptr %Ci, i64 0 ; <ptr> [#uses=1]
%14 = mul i64 %n, %Cs ; <i64> [#uses=1]
- %15 = getelementptr inbounds float, float* %Cr, i64 %14 ; <float*> [#uses=1]
+ %15 = getelementptr inbounds float, ptr %Cr, i64 %14 ; <ptr> [#uses=1]
%16 = mul i64 %n, %Cs ; <i64> [#uses=1]
- %17 = getelementptr inbounds float, float* %Ci, i64 %16 ; <float*> [#uses=1]
+ %17 = getelementptr inbounds float, ptr %Ci, i64 %16 ; <ptr> [#uses=1]
%18 = mul i64 %n, 2 ; <i64> [#uses=1]
%19 = mul i64 %18, %Cs ; <i64> [#uses=1]
- %20 = getelementptr inbounds float, float* %Cr, i64 %19 ; <float*> [#uses=1]
+ %20 = getelementptr inbounds float, ptr %Cr, i64 %19 ; <ptr> [#uses=1]
%21 = mul i64 %n, 2 ; <i64> [#uses=1]
%22 = mul i64 %21, %Cs ; <i64> [#uses=1]
- %23 = getelementptr inbounds float, float* %Ci, i64 %22 ; <float*> [#uses=1]
+ %23 = getelementptr inbounds float, ptr %Ci, i64 %22 ; <ptr> [#uses=1]
br label %bb13
bb: ; preds = %bb13
- %24 = load float, float* %A0r.0, align 4 ; <float> [#uses=1]
- %25 = load float, float* %A0i.0, align 4 ; <float> [#uses=1]
- %26 = load float, float* %A1r.0, align 4 ; <float> [#uses=2]
- %27 = load float, float* %A1i.0, align 4 ; <float> [#uses=2]
- %28 = load float, float* %A2r.0, align 4 ; <float> [#uses=2]
- %29 = load float, float* %A2i.0, align 4 ; <float> [#uses=2]
+ %24 = load float, ptr %A0r.0, align 4 ; <float> [#uses=1]
+ %25 = load float, ptr %A0i.0, align 4 ; <float> [#uses=1]
+ %26 = load float, ptr %A1r.0, align 4 ; <float> [#uses=2]
+ %27 = load float, ptr %A1i.0, align 4 ; <float> [#uses=2]
+ %28 = load float, ptr %A2r.0, align 4 ; <float> [#uses=2]
+ %29 = load float, ptr %A2i.0, align 4 ; <float> [#uses=2]
%30 = fadd float %26, %28 ; <float> [#uses=2]
%31 = fadd float %27, %29 ; <float> [#uses=2]
%32 = fsub float %26, %28 ; <float> [#uses=1]
%43 = fsub float %39, %40 ; <float> [#uses=1]
%44 = fsub float %38, %41 ; <float> [#uses=1]
%45 = fadd float %39, %40 ; <float> [#uses=1]
- store float %34, float* %C0r.0, align 4
- store float %35, float* %C0i.0, align 4
- store float %42, float* %C1r.0, align 4
- store float %43, float* %C1i.0, align 4
- store float %44, float* %C2r.0, align 4
- store float %45, float* %C2i.0, align 4
- %46 = getelementptr inbounds float, float* %A0r.0, i64 %As ; <float*> [#uses=1]
- %47 = getelementptr inbounds float, float* %A0i.0, i64 %As ; <float*> [#uses=1]
- %48 = getelementptr inbounds float, float* %A1r.0, i64 %As ; <float*> [#uses=1]
- %49 = getelementptr inbounds float, float* %A1i.0, i64 %As ; <float*> [#uses=1]
- %50 = getelementptr inbounds float, float* %A2r.0, i64 %As ; <float*> [#uses=1]
- %51 = getelementptr inbounds float, float* %A2i.0, i64 %As ; <float*> [#uses=1]
- %52 = getelementptr inbounds float, float* %C0r.0, i64 %Cs ; <float*> [#uses=1]
- %53 = getelementptr inbounds float, float* %C0i.0, i64 %Cs ; <float*> [#uses=1]
- %54 = getelementptr inbounds float, float* %C1r.0, i64 %Cs ; <float*> [#uses=1]
- %55 = getelementptr inbounds float, float* %C1i.0, i64 %Cs ; <float*> [#uses=1]
- %56 = getelementptr inbounds float, float* %C2r.0, i64 %Cs ; <float*> [#uses=1]
- %57 = getelementptr inbounds float, float* %C2i.0, i64 %Cs ; <float*> [#uses=1]
+ store float %34, ptr %C0r.0, align 4
+ store float %35, ptr %C0i.0, align 4
+ store float %42, ptr %C1r.0, align 4
+ store float %43, ptr %C1i.0, align 4
+ store float %44, ptr %C2r.0, align 4
+ store float %45, ptr %C2i.0, align 4
+ %46 = getelementptr inbounds float, ptr %A0r.0, i64 %As ; <ptr> [#uses=1]
+ %47 = getelementptr inbounds float, ptr %A0i.0, i64 %As ; <ptr> [#uses=1]
+ %48 = getelementptr inbounds float, ptr %A1r.0, i64 %As ; <ptr> [#uses=1]
+ %49 = getelementptr inbounds float, ptr %A1i.0, i64 %As ; <ptr> [#uses=1]
+ %50 = getelementptr inbounds float, ptr %A2r.0, i64 %As ; <ptr> [#uses=1]
+ %51 = getelementptr inbounds float, ptr %A2i.0, i64 %As ; <ptr> [#uses=1]
+ %52 = getelementptr inbounds float, ptr %C0r.0, i64 %Cs ; <ptr> [#uses=1]
+ %53 = getelementptr inbounds float, ptr %C0i.0, i64 %Cs ; <ptr> [#uses=1]
+ %54 = getelementptr inbounds float, ptr %C1r.0, i64 %Cs ; <ptr> [#uses=1]
+ %55 = getelementptr inbounds float, ptr %C1i.0, i64 %Cs ; <ptr> [#uses=1]
+ %56 = getelementptr inbounds float, ptr %C2r.0, i64 %Cs ; <ptr> [#uses=1]
+ %57 = getelementptr inbounds float, ptr %C2i.0, i64 %Cs ; <ptr> [#uses=1]
%58 = add nsw i64 %i.0, 1 ; <i64> [#uses=1]
br label %bb13
bb13: ; preds = %bb, %entry
%i.0 = phi i64 [ 0, %entry ], [ %58, %bb ] ; <i64> [#uses=2]
- %C2i.0 = phi float* [ %23, %entry ], [ %57, %bb ] ; <float*> [#uses=2]
- %C2r.0 = phi float* [ %20, %entry ], [ %56, %bb ] ; <float*> [#uses=2]
- %C1i.0 = phi float* [ %17, %entry ], [ %55, %bb ] ; <float*> [#uses=2]
- %C1r.0 = phi float* [ %15, %entry ], [ %54, %bb ] ; <float*> [#uses=2]
- %C0i.0 = phi float* [ %13, %entry ], [ %53, %bb ] ; <float*> [#uses=2]
- %C0r.0 = phi float* [ %12, %entry ], [ %52, %bb ] ; <float*> [#uses=2]
- %A2i.0 = phi float* [ %11, %entry ], [ %51, %bb ] ; <float*> [#uses=2]
- %A2r.0 = phi float* [ %8, %entry ], [ %50, %bb ] ; <float*> [#uses=2]
- %A1i.0 = phi float* [ %5, %entry ], [ %49, %bb ] ; <float*> [#uses=2]
- %A1r.0 = phi float* [ %3, %entry ], [ %48, %bb ] ; <float*> [#uses=2]
- %A0i.0 = phi float* [ %1, %entry ], [ %47, %bb ] ; <float*> [#uses=2]
- %A0r.0 = phi float* [ %0, %entry ], [ %46, %bb ] ; <float*> [#uses=2]
+ %C2i.0 = phi ptr [ %23, %entry ], [ %57, %bb ] ; <ptr> [#uses=2]
+ %C2r.0 = phi ptr [ %20, %entry ], [ %56, %bb ] ; <ptr> [#uses=2]
+ %C1i.0 = phi ptr [ %17, %entry ], [ %55, %bb ] ; <ptr> [#uses=2]
+ %C1r.0 = phi ptr [ %15, %entry ], [ %54, %bb ] ; <ptr> [#uses=2]
+ %C0i.0 = phi ptr [ %13, %entry ], [ %53, %bb ] ; <ptr> [#uses=2]
+ %C0r.0 = phi ptr [ %12, %entry ], [ %52, %bb ] ; <ptr> [#uses=2]
+ %A2i.0 = phi ptr [ %11, %entry ], [ %51, %bb ] ; <ptr> [#uses=2]
+ %A2r.0 = phi ptr [ %8, %entry ], [ %50, %bb ] ; <ptr> [#uses=2]
+ %A1i.0 = phi ptr [ %5, %entry ], [ %49, %bb ] ; <ptr> [#uses=2]
+ %A1r.0 = phi ptr [ %3, %entry ], [ %48, %bb ] ; <ptr> [#uses=2]
+ %A0i.0 = phi ptr [ %1, %entry ], [ %47, %bb ] ; <ptr> [#uses=2]
+ %A0r.0 = phi ptr [ %0, %entry ], [ %46, %bb ] ; <ptr> [#uses=2]
%59 = icmp slt i64 %i.0, %n ; <i1> [#uses=1]
br i1 %59, label %bb, label %bb14
; NOTE: Assertions have been autogenerated by utils/update_test_checks.py
; RUN: opt -passes=instcombine -S < %s | FileCheck %s
-define void @test_bitcast_1(i1 %c, i32* %ptr) {
+define void @test_bitcast_1(i1 %c, ptr %ptr) {
; CHECK-LABEL: @test_bitcast_1(
; CHECK-NEXT: entry:
; CHECK-NEXT: br i1 [[C:%.*]], label [[B0:%.*]], label [[B1:%.*]]
; CHECK: b0:
-; CHECK-NEXT: [[CAST_0:%.*]] = bitcast i32* [[PTR:%.*]] to i8*
-; CHECK-NEXT: call void @use(i8* [[CAST_0]])
+; CHECK-NEXT: call void @use(ptr [[PTR:%.*]])
; CHECK-NEXT: br label [[END:%.*]]
; CHECK: b1:
; CHECK-NEXT: br label [[END]]
; CHECK: end:
-; CHECK-NEXT: [[P:%.*]] = bitcast i32* [[PTR]] to i8*
-; CHECK-NEXT: store i8 0, i8* [[P]], align 1
+; CHECK-NEXT: store i8 0, ptr [[PTR]], align 1
; CHECK-NEXT: ret void
;
entry:
- %cast.0 = bitcast i32* %ptr to i8*
- %cast.1 = bitcast i32* %ptr to i8*
br i1 %c, label %b0, label %b1
b0:
- call void @use(i8* %cast.0)
+ call void @use(ptr %ptr)
br label %end
b1:
br label %end
end:
- %p = phi i8* [ %cast.0, %b0 ], [ %cast.1, %b1 ]
- store i8 0, i8* %p
+ %p = phi ptr [ %ptr, %b0 ], [ %ptr, %b1 ]
+ store i8 0, ptr %p
ret void
}
-define void @test_bitcast_2(i1 %c, i32* %ptr) {
+define void @test_bitcast_2(i1 %c, ptr %ptr) {
; CHECK-LABEL: @test_bitcast_2(
; CHECK-NEXT: entry:
; CHECK-NEXT: br i1 [[C:%.*]], label [[B0:%.*]], label [[B1:%.*]]
; CHECK: b0:
; CHECK-NEXT: br label [[END:%.*]]
; CHECK: b1:
-; CHECK-NEXT: [[CAST_1:%.*]] = bitcast i32* [[PTR:%.*]] to i8*
-; CHECK-NEXT: call void @use(i8* [[CAST_1]])
+; CHECK-NEXT: call void @use(ptr [[PTR:%.*]])
; CHECK-NEXT: br label [[END]]
; CHECK: end:
-; CHECK-NEXT: [[P:%.*]] = bitcast i32* [[PTR]] to i8*
-; CHECK-NEXT: store i8 0, i8* [[P]], align 1
+; CHECK-NEXT: store i8 0, ptr [[PTR]], align 1
; CHECK-NEXT: ret void
;
entry:
br i1 %c, label %b0, label %b1
b0:
- %cast.0 = bitcast i32* %ptr to i8*
br label %end
b1:
- %cast.1 = bitcast i32* %ptr to i8*
- call void @use(i8* %cast.1)
+ call void @use(ptr %ptr)
br label %end
end:
- %p = phi i8* [ %cast.0, %b0 ], [ %cast.1, %b1 ]
- store i8 0, i8* %p
+ %p = phi ptr [ %ptr, %b0 ], [ %ptr, %b1 ]
+ store i8 0, ptr %p
ret void
}
-define void @test_bitcast_3(i1 %c, i32** %ptr) {
+define void @test_bitcast_3(i1 %c, ptr %ptr) {
; CHECK-LABEL: @test_bitcast_3(
; CHECK-NEXT: entry:
-; CHECK-NEXT: [[LOAD_PTR:%.*]] = load i32*, i32** [[PTR:%.*]], align 8
+; CHECK-NEXT: [[LOAD_PTR:%.*]] = load ptr, ptr [[PTR:%.*]], align 8
; CHECK-NEXT: br i1 [[C:%.*]], label [[B0:%.*]], label [[B1:%.*]]
; CHECK: b0:
; CHECK-NEXT: br label [[END:%.*]]
; CHECK: b1:
-; CHECK-NEXT: [[CAST_1:%.*]] = bitcast i32* [[LOAD_PTR]] to i8*
-; CHECK-NEXT: call void @use(i8* [[CAST_1]])
+; CHECK-NEXT: call void @use(ptr [[LOAD_PTR]])
; CHECK-NEXT: br label [[END]]
; CHECK: end:
-; CHECK-NEXT: [[P:%.*]] = bitcast i32* [[LOAD_PTR]] to i8*
-; CHECK-NEXT: store i8 0, i8* [[P]], align 1
+; CHECK-NEXT: store i8 0, ptr [[LOAD_PTR]], align 1
; CHECK-NEXT: ret void
;
entry:
- %load.ptr = load i32*, i32** %ptr
+ %load.ptr = load ptr, ptr %ptr
br i1 %c, label %b0, label %b1
b0:
- %cast.0 = bitcast i32* %load.ptr to i8*
br label %end
b1:
- %cast.1 = bitcast i32* %load.ptr to i8*
- call void @use(i8* %cast.1)
+ call void @use(ptr %load.ptr)
br label %end
end:
- %p = phi i8* [ %cast.0, %b0 ], [ %cast.1, %b1 ]
- store i8 0, i8* %p
+ %p = phi ptr [ %load.ptr, %b0 ], [ %load.ptr, %b1 ]
+ store i8 0, ptr %p
ret void
}
-define void @test_bitcast_loads_in_different_bbs(i1 %c, i32** %ptr.0, i32** %ptr.1) {
+define void @test_bitcast_loads_in_different_bbs(i1 %c, ptr %ptr.0, ptr %ptr.1) {
; CHECK-LABEL: @test_bitcast_loads_in_different_bbs(
; CHECK-NEXT: entry:
; CHECK-NEXT: br i1 [[C:%.*]], label [[B0:%.*]], label [[B1:%.*]]
; CHECK: b0:
-; CHECK-NEXT: [[TMP0:%.*]] = bitcast i32** [[PTR_0:%.*]] to i8**
-; CHECK-NEXT: [[LOAD_PTR_02:%.*]] = load i8*, i8** [[TMP0]], align 8
-; CHECK-NEXT: call void @use(i8* [[LOAD_PTR_02]])
+; CHECK-NEXT: [[LOAD_PTR_02:%.*]] = load ptr, ptr [[PTR_0:%.*]], align 8
+; CHECK-NEXT: call void @use(ptr [[LOAD_PTR_02]])
; CHECK-NEXT: br label [[END:%.*]]
; CHECK: b1:
-; CHECK-NEXT: [[TMP1:%.*]] = bitcast i32** [[PTR_1:%.*]] to i8**
-; CHECK-NEXT: [[LOAD_PTR_11:%.*]] = load i8*, i8** [[TMP1]], align 8
+; CHECK-NEXT: [[LOAD_PTR_11:%.*]] = load ptr, ptr [[PTR_1:%.*]], align 8
; CHECK-NEXT: br label [[END]]
; CHECK: end:
-; CHECK-NEXT: [[P:%.*]] = phi i8* [ [[LOAD_PTR_02]], [[B0]] ], [ [[LOAD_PTR_11]], [[B1]] ]
-; CHECK-NEXT: store i8 0, i8* [[P]], align 1
+; CHECK-NEXT: [[P:%.*]] = phi ptr [ [[LOAD_PTR_02]], [[B0]] ], [ [[LOAD_PTR_11]], [[B1]] ]
+; CHECK-NEXT: store i8 0, ptr [[P]], align 1
; CHECK-NEXT: ret void
;
entry:
br i1 %c, label %b0, label %b1
b0:
- %load.ptr.0 = load i32*, i32** %ptr.0
- %cast.0 = bitcast i32* %load.ptr.0 to i8*
- call void @use(i8* %cast.0)
+ %load.ptr.0 = load ptr, ptr %ptr.0
+ call void @use(ptr %load.ptr.0)
br label %end
b1:
- %load.ptr.1 = load i32*, i32** %ptr.1
- %cast.1 = bitcast i32* %load.ptr.1 to i8*
+ %load.ptr.1 = load ptr, ptr %ptr.1
br label %end
end:
- %p = phi i8* [ %cast.0, %b0 ], [ %cast.1, %b1 ]
- store i8 0, i8* %p
+ %p = phi ptr [ %load.ptr.0, %b0 ], [ %load.ptr.1, %b1 ]
+ store i8 0, ptr %p
ret void
}
-define void @test_gep_1(i1 %c, i32* %ptr) {
+define void @test_gep_1(i1 %c, ptr %ptr) {
; CHECK-LABEL: @test_gep_1(
; CHECK-NEXT: entry:
; CHECK-NEXT: br i1 [[C:%.*]], label [[B0:%.*]], label [[B1:%.*]]
; CHECK: b0:
-; CHECK-NEXT: call void @use.i32(i32* [[PTR:%.*]])
+; CHECK-NEXT: call void @use.i32(ptr [[PTR:%.*]])
; CHECK-NEXT: br label [[END:%.*]]
; CHECK: b1:
; CHECK-NEXT: br label [[END]]
; CHECK: end:
-; CHECK-NEXT: store i32 0, i32* [[PTR]], align 4
+; CHECK-NEXT: store i32 0, ptr [[PTR]], align 4
; CHECK-NEXT: ret void
;
entry:
br i1 %c, label %b0, label %b1
b0:
- %cast.0 = getelementptr i32, i32* %ptr, i32 0
- call void @use.i32(i32* %cast.0)
+ call void @use.i32(ptr %ptr)
br label %end
b1:
- %cast.1 = getelementptr i32, i32* %ptr, i32 0
br label %end
end:
- %p = phi i32* [ %cast.0, %b0 ], [ %cast.1, %b1 ]
- store i32 0, i32* %p
+ %p = phi ptr [ %ptr, %b0 ], [ %ptr, %b1 ]
+ store i32 0, ptr %p
ret void
}
-define void @test_bitcast_not_foldable(i1 %c, i32* %ptr.0, i32* %ptr.1) {
+define void @test_bitcast_not_foldable(i1 %c, ptr %ptr.0, ptr %ptr.1) {
; CHECK-LABEL: @test_bitcast_not_foldable(
; CHECK-NEXT: entry:
; CHECK-NEXT: br i1 [[C:%.*]], label [[B0:%.*]], label [[B1:%.*]]
; CHECK: b0:
-; CHECK-NEXT: [[CAST_0:%.*]] = bitcast i32* [[PTR_0:%.*]] to i8*
; CHECK-NEXT: br label [[END:%.*]]
; CHECK: b1:
-; CHECK-NEXT: [[CAST_1:%.*]] = bitcast i32* [[PTR_1:%.*]] to i8*
-; CHECK-NEXT: call void @use(i8* [[CAST_1]])
+; CHECK-NEXT: call void @use(ptr [[PTR_1:%.*]])
; CHECK-NEXT: br label [[END]]
; CHECK: end:
-; CHECK-NEXT: [[P:%.*]] = phi i8* [ [[CAST_0]], [[B0]] ], [ [[CAST_1]], [[B1]] ]
-; CHECK-NEXT: store i8 0, i8* [[P]], align 1
+; CHECK-NEXT: [[P:%.*]] = phi ptr [ [[PTR_0:%.*]], [[B0]] ], [ [[PTR_1:%.*]], [[B1]] ]
+; CHECK-NEXT: store i8 0, ptr [[P]], align 1
; CHECK-NEXT: ret void
;
entry:
br i1 %c, label %b0, label %b1
b0:
- %cast.0 = bitcast i32* %ptr.0 to i8*
br label %end
b1:
- %cast.1 = bitcast i32* %ptr.1 to i8*
- call void @use(i8* %cast.1)
+ call void @use(ptr %ptr.1)
br label %end
end:
- %p = phi i8* [ %cast.0, %b0 ], [ %cast.1, %b1 ]
- store i8 0, i8* %p
+ %p = phi ptr [ %ptr.0, %b0 ], [ %ptr.1, %b1 ]
+ store i8 0, ptr %p
ret void
}
-define void @test_bitcast_with_extra_use(i1 %c, i32* %ptr) {
+define void @test_bitcast_with_extra_use(i1 %c, ptr %ptr) {
; CHECK-LABEL: @test_bitcast_with_extra_use(
; CHECK-NEXT: entry:
; CHECK-NEXT: br i1 [[C:%.*]], label [[B0:%.*]], label [[B1:%.*]]
; CHECK: b0:
-; CHECK-NEXT: [[CAST_0:%.*]] = bitcast i32* [[PTR:%.*]] to i8*
-; CHECK-NEXT: call void @use(i8* [[CAST_0]])
+; CHECK-NEXT: call void @use(ptr [[PTR:%.*]])
; CHECK-NEXT: br label [[END:%.*]]
; CHECK: b1:
; CHECK-NEXT: br label [[END]]
; CHECK: end:
-; CHECK-NEXT: [[P:%.*]] = bitcast i32* [[PTR]] to i8*
-; CHECK-NEXT: store i8 0, i8* [[P]], align 1
+; CHECK-NEXT: store i8 0, ptr [[PTR]], align 1
; CHECK-NEXT: ret void
;
entry:
br i1 %c, label %b0, label %b1
b0:
- %cast.0 = bitcast i32* %ptr to i8*
- call void @use(i8* %cast.0)
+ call void @use(ptr %ptr)
br label %end
b1:
- %cast.1 = bitcast i32* %ptr to i8*
br label %end
end:
- %p = phi i8* [ %cast.0, %b0 ], [ %cast.1, %b1 ]
- store i8 0, i8* %p
+ %p = phi ptr [ %ptr, %b0 ], [ %ptr, %b1 ]
+ store i8 0, ptr %p
ret void
}
-define void @test_bitcast_different_bases(i1 %c, i32* %ptr.0, i32* %ptr.1) {
+define void @test_bitcast_different_bases(i1 %c, ptr %ptr.0, ptr %ptr.1) {
; CHECK-LABEL: @test_bitcast_different_bases(
; CHECK-NEXT: entry:
; CHECK-NEXT: br i1 [[C:%.*]], label [[B0:%.*]], label [[B1:%.*]]
; CHECK: b0:
-; CHECK-NEXT: [[CAST_0:%.*]] = bitcast i32* [[PTR_0:%.*]] to i8*
-; CHECK-NEXT: call void @use(i8* [[CAST_0]])
+; CHECK-NEXT: call void @use(ptr [[PTR_0:%.*]])
; CHECK-NEXT: br label [[END:%.*]]
; CHECK: b1:
-; CHECK-NEXT: [[CAST_1:%.*]] = bitcast i32* [[PTR_1:%.*]] to i8*
; CHECK-NEXT: br label [[END]]
; CHECK: end:
-; CHECK-NEXT: [[P:%.*]] = phi i8* [ [[CAST_0]], [[B0]] ], [ [[CAST_1]], [[B1]] ]
-; CHECK-NEXT: store i8 0, i8* [[P]], align 1
+; CHECK-NEXT: [[P:%.*]] = phi ptr [ [[PTR_0:%.*]], [[B0]] ], [ [[PTR_1:%.*]], [[B1]] ]
+; CHECK-NEXT: store i8 0, ptr [[P]], align 1
; CHECK-NEXT: ret void
;
entry:
br i1 %c, label %b0, label %b1
b0:
- %cast.0 = bitcast i32* %ptr.0 to i8*
- call void @use(i8* %cast.0)
+ call void @use(ptr %ptr.0)
br label %end
b1:
- %cast.1 = bitcast i32* %ptr.1 to i8*
br label %end
end:
- %p = phi i8* [ %cast.0, %b0 ], [ %cast.1, %b1 ]
- store i8 0, i8* %p
+ %p = phi ptr [ %ptr.0, %b0 ], [ %ptr.1, %b1 ]
+ store i8 0, ptr %p
ret void
}
-define void @test_bitcast_gep_chains(i1 %c, i32* %ptr) {
+define void @test_bitcast_gep_chains(i1 %c, ptr %ptr) {
; CHECK-LABEL: @test_bitcast_gep_chains(
; CHECK-NEXT: entry:
; CHECK-NEXT: br i1 [[C:%.*]], label [[B0:%.*]], label [[B1:%.*]]
; CHECK: b0:
-; CHECK-NEXT: [[CAST_0:%.*]] = bitcast i32* [[PTR:%.*]] to i8*
-; CHECK-NEXT: call void @use(i8* [[CAST_0]])
+; CHECK-NEXT: call void @use(ptr [[PTR:%.*]])
; CHECK-NEXT: br label [[END:%.*]]
; CHECK: b1:
-; CHECK-NEXT: call void @use.i32(i32* [[PTR]])
+; CHECK-NEXT: call void @use.i32(ptr [[PTR]])
; CHECK-NEXT: br label [[END]]
; CHECK: end:
-; CHECK-NEXT: [[P:%.*]] = bitcast i32* [[PTR]] to i8*
-; CHECK-NEXT: store i8 0, i8* [[P]], align 1
+; CHECK-NEXT: store i8 0, ptr [[PTR]], align 1
; CHECK-NEXT: ret void
;
entry:
- %gep = getelementptr i32, i32* %ptr, i32 0
br i1 %c, label %b0, label %b1
b0:
- %cast.0 = bitcast i32* %gep to i8*
- call void @use(i8* %cast.0)
+ call void @use(ptr %ptr)
br label %end
b1:
- %cast.1 = bitcast i32* %ptr to i8*
- %cast.2 = bitcast i8* %cast.1 to i32*
- call void @use.i32(i32* %cast.2)
- %cast.3 = bitcast i32* %cast.2 to i8*
+ call void @use.i32(ptr %ptr)
br label %end
end:
- %p = phi i8* [ %cast.0, %b0 ], [ %cast.3, %b1 ]
- store i8 0, i8* %p
+ %p = phi ptr [ %ptr, %b0 ], [ %ptr, %b1 ]
+ store i8 0, ptr %p
ret void
}
-define void @test_4_incoming_values_different_bases_1(i32 %c, i32* %ptr.0, i32* %ptr.1) {
+define void @test_4_incoming_values_different_bases_1(i32 %c, ptr %ptr.0, ptr %ptr.1) {
; CHECK-LABEL: @test_4_incoming_values_different_bases_1(
; CHECK-NEXT: entry:
; CHECK-NEXT: switch i32 [[C:%.*]], label [[END_2:%.*]] [
; CHECK-NEXT: i32 3, label [[B3:%.*]]
; CHECK-NEXT: ]
; CHECK: b0:
-; CHECK-NEXT: [[CAST_0:%.*]] = bitcast i32* [[PTR_0:%.*]] to i8*
-; CHECK-NEXT: call void @use(i8* [[CAST_0]])
+; CHECK-NEXT: call void @use(ptr [[PTR_0:%.*]])
; CHECK-NEXT: br label [[END:%.*]]
; CHECK: b1:
-; CHECK-NEXT: [[CAST_3:%.*]] = bitcast i32* [[PTR_1:%.*]] to i8*
; CHECK-NEXT: br label [[END]]
; CHECK: b2:
-; CHECK-NEXT: [[CAST_4:%.*]] = bitcast i32* [[PTR_0]] to i8*
; CHECK-NEXT: br label [[END]]
; CHECK: b3:
-; CHECK-NEXT: [[CAST_5:%.*]] = bitcast i32* [[PTR_0]] to i8*
; CHECK-NEXT: br label [[END]]
; CHECK: end:
-; CHECK-NEXT: [[P:%.*]] = phi i8* [ [[CAST_0]], [[B0]] ], [ [[CAST_3]], [[B1]] ], [ [[CAST_4]], [[B2]] ], [ [[CAST_5]], [[B3]] ]
-; CHECK-NEXT: store i8 0, i8* [[P]], align 1
+; CHECK-NEXT: [[P:%.*]] = phi ptr [ [[PTR_0:%.*]], [[B0]] ], [ [[PTR_1:%.*]], [[B1]] ], [ [[PTR_0]], [[B2]] ], [ [[PTR_0]], [[B3]] ]
+; CHECK-NEXT: store i8 0, ptr [[P]], align 1
; CHECK-NEXT: ret void
; CHECK: end.2:
; CHECK-NEXT: ret void
;
entry:
- %gep = getelementptr i32, i32* %ptr.0, i32 0
switch i32 %c, label %end.2 [ i32 0, label %b0
i32 1, label %b1
i32 2, label %b2
i32 3, label %b3]
b0:
- %cast.0 = bitcast i32* %gep to i8*
- call void @use(i8* %cast.0)
+ call void @use(ptr %ptr.0)
br label %end
b1:
- %cast.1 = bitcast i32* %ptr.1 to i8*
- %cast.2 = bitcast i8* %cast.1 to i64*
- %cast.3 = bitcast i64* %cast.2 to i8*
br label %end
b2:
- %cast.4 = bitcast i32* %gep to i8*
br label %end
b3:
- %cast.5 = bitcast i32 * %ptr.0 to i8*
br label %end
end:
- %p = phi i8* [ %cast.0, %b0 ], [ %cast.3, %b1 ], [ %cast.4, %b2 ], [ %cast.5, %b3]
- store i8 0, i8* %p
+ %p = phi ptr [ %ptr.0, %b0 ], [ %ptr.1, %b1 ], [ %ptr.0, %b2 ], [ %ptr.0, %b3]
+ store i8 0, ptr %p
ret void
end.2:
ret void
}
-define void @test_4_incoming_values_different_bases_2(i32 %c, i32* %ptr.0, i32* %ptr.1) {
+define void @test_4_incoming_values_different_bases_2(i32 %c, ptr %ptr.0, ptr %ptr.1) {
; CHECK-LABEL: @test_4_incoming_values_different_bases_2(
; CHECK-NEXT: entry:
; CHECK-NEXT: switch i32 [[C:%.*]], label [[END_2:%.*]] [
; CHECK: b0:
; CHECK-NEXT: br label [[END:%.*]]
; CHECK: b1:
-; CHECK-NEXT: [[CAST_1:%.*]] = bitcast i32* [[PTR_0:%.*]] to i8*
-; CHECK-NEXT: call void @use(i8* [[CAST_1]])
+; CHECK-NEXT: call void @use(ptr [[PTR_0:%.*]])
; CHECK-NEXT: br label [[END]]
; CHECK: b2:
; CHECK-NEXT: br label [[END]]
; CHECK: b3:
; CHECK-NEXT: br label [[END]]
; CHECK: end:
-; CHECK-NEXT: [[P_IN:%.*]] = phi i32* [ [[PTR_1:%.*]], [[B0]] ], [ [[PTR_0]], [[B1]] ], [ [[PTR_0]], [[B2]] ], [ [[PTR_0]], [[B3]] ]
-; CHECK-NEXT: [[P:%.*]] = bitcast i32* [[P_IN]] to i8*
-; CHECK-NEXT: store i8 0, i8* [[P]], align 1
+; CHECK-NEXT: [[P_IN:%.*]] = phi ptr [ [[PTR_1:%.*]], [[B0]] ], [ [[PTR_0]], [[B1]] ], [ [[PTR_0]], [[B2]] ], [ [[PTR_0]], [[B3]] ]
+; CHECK-NEXT: store i8 0, ptr [[P_IN]], align 1
; CHECK-NEXT: ret void
; CHECK: end.2:
; CHECK-NEXT: ret void
;
entry:
- %gep = getelementptr i32, i32* %ptr.0, i32 0
switch i32 %c, label %end.2 [ i32 0, label %b0
i32 1, label %b1
i32 2, label %b2
i32 3, label %b3]
b0:
- %cast.0 = bitcast i32* %ptr.1 to i8*
br label %end
b1:
- %cast.1 = bitcast i32* %ptr.0 to i8*
- call void @use(i8* %cast.1)
- %cast.2 = bitcast i8* %cast.1 to i64*
- %cast.3 = bitcast i64* %cast.2 to i8*
+ call void @use(ptr %ptr.0)
br label %end
b2:
- %cast.4 = bitcast i32* %gep to i8*
br label %end
b3:
- %cast.5 = bitcast i32 * %ptr.0 to i8*
br label %end
end:
- %p = phi i8* [ %cast.0, %b0 ], [ %cast.3, %b1 ], [ %cast.4, %b2 ], [ %cast.5, %b3]
- store i8 0, i8* %p
+ %p = phi ptr [ %ptr.1, %b0 ], [ %ptr.0, %b1 ], [ %ptr.0, %b2 ], [ %ptr.0, %b3]
+ store i8 0, ptr %p
ret void
end.2:
ret void
}
-define void @test_4_incoming_values_different_bases_3(i32 %c, i32* %ptr.0, i32* %ptr.1) {
+define void @test_4_incoming_values_different_bases_3(i32 %c, ptr %ptr.0, ptr %ptr.1) {
; CHECK-LABEL: @test_4_incoming_values_different_bases_3(
; CHECK-NEXT: entry:
; CHECK-NEXT: switch i32 [[C:%.*]], label [[END_2:%.*]] [
; CHECK-NEXT: i32 3, label [[B3:%.*]]
; CHECK-NEXT: ]
; CHECK: b0:
-; CHECK-NEXT: [[CAST_0:%.*]] = bitcast i32* [[PTR_0:%.*]] to i8*
; CHECK-NEXT: br label [[END:%.*]]
; CHECK: b1:
-; CHECK-NEXT: [[CAST_3:%.*]] = bitcast i32* [[PTR_0]] to i8*
; CHECK-NEXT: br label [[END]]
; CHECK: b2:
-; CHECK-NEXT: [[CAST_4:%.*]] = bitcast i32* [[PTR_0]] to i8*
-; CHECK-NEXT: call void @use(i8* [[CAST_4]])
+; CHECK-NEXT: call void @use(ptr [[PTR_0]])
; CHECK-NEXT: br label [[END]]
; CHECK: b3:
-; CHECK-NEXT: [[CAST_5:%.*]] = bitcast i32* [[PTR_1:%.*]] to i8*
; CHECK-NEXT: br label [[END]]
; CHECK: end:
-; CHECK-NEXT: [[P:%.*]] = phi i8* [ [[CAST_0]], [[B0]] ], [ [[CAST_3]], [[B1]] ], [ [[CAST_4]], [[B2]] ], [ [[CAST_5]], [[B3]] ]
-; CHECK-NEXT: store i8 0, i8* [[P]], align 1
+; CHECK-NEXT: [[P:%.*]] = phi ptr [ [[PTR_0:%.*]], [[B0]] ], [ [[PTR_0]], [[B1]] ], [ [[PTR_0]], [[B2]] ], [ [[PTR_1:%.*]], [[B3]] ]
+; CHECK-NEXT: store i8 0, ptr [[P]], align 1
; CHECK-NEXT: ret void
; CHECK: end.2:
; CHECK-NEXT: ret void
;
entry:
- %gep = getelementptr i32, i32* %ptr.0, i32 0
switch i32 %c, label %end.2 [ i32 0, label %b0
i32 1, label %b1
i32 2, label %b2
i32 3, label %b3]
b0:
- %cast.0 = bitcast i32* %ptr.0 to i8*
br label %end
b1:
- %cast.1 = bitcast i32* %ptr.0 to i8*
- %cast.2 = bitcast i8* %cast.1 to i64*
- %cast.3 = bitcast i64* %cast.2 to i8*
br label %end
b2:
- %cast.4 = bitcast i32* %gep to i8*
- call void @use(i8* %cast.4)
+ call void @use(ptr %ptr.0)
br label %end
b3:
- %cast.5 = bitcast i32 * %ptr.1 to i8*
br label %end
end:
- %p = phi i8* [ %cast.0, %b0 ], [ %cast.3, %b1 ], [ %cast.4, %b2 ], [ %cast.5, %b3]
- store i8 0, i8* %p
+ %p = phi ptr [ %ptr.0, %b0 ], [ %ptr.0, %b1 ], [ %ptr.0, %b2 ], [ %ptr.1, %b3]
+ store i8 0, ptr %p
ret void
end.2:
ret void
}
-define void @test_addrspacecast_1(i1 %c, i32* %ptr) {
+define void @test_addrspacecast_1(i1 %c, ptr %ptr) {
; CHECK-LABEL: @test_addrspacecast_1(
; CHECK-NEXT: entry:
; CHECK-NEXT: br i1 [[C:%.*]], label [[B0:%.*]], label [[B1:%.*]]
; CHECK: b0:
; CHECK-NEXT: br label [[END:%.*]]
; CHECK: b1:
-; CHECK-NEXT: [[TMP0:%.*]] = bitcast i32* [[PTR:%.*]] to i8*
-; CHECK-NEXT: [[CAST_1:%.*]] = addrspacecast i8* [[TMP0]] to i8 addrspace(1)*
-; CHECK-NEXT: call void @use.i8.addrspace1(i8 addrspace(1)* [[CAST_1]])
+; CHECK-NEXT: [[CAST_1:%.*]] = addrspacecast ptr [[PTR:%.*]] to ptr addrspace(1)
+; CHECK-NEXT: call void @use.i8.addrspace1(ptr addrspace(1) [[CAST_1]])
; CHECK-NEXT: br label [[END]]
; CHECK: end:
-; CHECK-NEXT: [[TMP1:%.*]] = bitcast i32* [[PTR]] to i8*
-; CHECK-NEXT: [[P:%.*]] = addrspacecast i8* [[TMP1]] to i8 addrspace(1)*
-; CHECK-NEXT: store i8 0, i8 addrspace(1)* [[P]], align 1
+; CHECK-NEXT: [[P:%.*]] = addrspacecast ptr [[PTR]] to ptr addrspace(1)
+; CHECK-NEXT: store i8 0, ptr addrspace(1) [[P]], align 1
; CHECK-NEXT: ret void
;
entry:
- %cast.0 = addrspacecast i32* %ptr to i8 addrspace(1)*
- %cast.1 = addrspacecast i32* %ptr to i8 addrspace(1)*
+ %cast.0 = addrspacecast ptr %ptr to ptr addrspace(1)
+ %cast.1 = addrspacecast ptr %ptr to ptr addrspace(1)
br i1 %c, label %b0, label %b1
b0:
br label %end
b1:
- call void @use.i8.addrspace1(i8 addrspace(1)* %cast.1)
+ call void @use.i8.addrspace1(ptr addrspace(1) %cast.1)
br label %end
end:
- %p = phi i8 addrspace(1)* [ %cast.0, %b0 ], [ %cast.1, %b1 ]
- store i8 0, i8 addrspace(1)* %p
+ %p = phi ptr addrspace(1) [ %cast.0, %b0 ], [ %cast.1, %b1 ]
+ store i8 0, ptr addrspace(1) %p
ret void
}
-declare void @use(i8*)
-declare void @use.i32(i32*)
-declare void @use.i8.addrspace1(i8 addrspace(1)*)
+declare void @use(ptr)
+declare void @use.i32(ptr)
+declare void @use.i8.addrspace1(ptr addrspace(1))
; CHECK: delay:
; CHECK-NEXT: br label [[FINAL]]
; CHECK: final:
-; CHECK-NEXT: [[USE2:%.*]] = phi i32 [ 1, [[ENTRY:%.*]] ], [ select (i1 icmp eq (i32* @A, i32* @B), i32 2, i32 1), [[DELAY]] ]
+; CHECK-NEXT: [[USE2:%.*]] = phi i32 [ 1, [[ENTRY:%.*]] ], [ select (i1 icmp eq (ptr @A, ptr @B), i32 2, i32 1), [[DELAY]] ]
; CHECK-NEXT: ret i32 [[USE2]]
;
entry:
br label %final
final:
- %use2 = phi i1 [ false, %entry ], [ icmp eq (i32* @A, i32* @B), %delay ]
+ %use2 = phi i1 [ false, %entry ], [ icmp eq (ptr @A, ptr @B), %delay ]
%value = select i1 %use2, i32 2, i32 1
ret i32 %value
}
; Don't crash on unreachable IR.
-define void @PR48369(i32 %a, i32* %p) {
+define void @PR48369(i32 %a, ptr %p) {
; CHECK-LABEL: @PR48369(
; CHECK-NEXT: entry:
; CHECK-NEXT: [[PHI_CMP:%.*]] = icmp sgt i32 [[A:%.*]], 0
; CHECK: bb1:
; CHECK-NEXT: [[CMP:%.*]] = phi i1 [ [[PHI_CMP]], [[DEADBB:%.*]] ], [ true, [[ENTRY:%.*]] ]
; CHECK-NEXT: [[SHL:%.*]] = select i1 [[CMP]], i32 256, i32 0
-; CHECK-NEXT: store i32 [[SHL]], i32* [[P:%.*]], align 4
+; CHECK-NEXT: store i32 [[SHL]], ptr [[P:%.*]], align 4
; CHECK-NEXT: br label [[END:%.*]]
; CHECK: deadbb:
; CHECK-NEXT: br label [[BB1]]
bb1:
%cmp = phi i1 [ %phi.cmp, %deadbb ], [ true, %entry ]
%shl = select i1 %cmp, i32 256, i32 0
- store i32 %shl, i32* %p
+ store i32 %shl, ptr %p
br label %end
deadbb:
; RUN: opt -S -passes=instcombine < %s | FileCheck %s
; OSS Fuzz: https://bugs.chromium.org/p/oss-fuzz/issues/detail?id=15217
-define i64 @fuzz15217(i1 %cond, i8* %Ptr, i64 %Val) {
+define i64 @fuzz15217(i1 %cond, ptr %Ptr, i64 %Val) {
; CHECK-LABEL: @fuzz15217(
; CHECK-NEXT: entry:
; CHECK-NEXT: br i1 [[COND:%.*]], label [[END:%.*]], label [[TWO:%.*]]
; We are really checking that this doesn't loop forever. We would never
; actually get to the checks here if it did.
-define void @timeout(i16* nocapture readonly %cinfo) {
+define void @timeout(ptr nocapture readonly %cinfo) {
; CHECK-LABEL: @timeout(
; CHECK-NEXT: entry:
; CHECK-NEXT: br label [[FOR_BODY:%.*]]
; CHECK: for.body:
-; CHECK-NEXT: [[ARRAYIDX15:%.*]] = getelementptr inbounds i16, i16* [[CINFO:%.*]], i32 2
-; CHECK-NEXT: [[L:%.*]] = load i16, i16* [[ARRAYIDX15]], align 2
+; CHECK-NEXT: [[ARRAYIDX15:%.*]] = getelementptr inbounds i16, ptr [[CINFO:%.*]], i32 2
+; CHECK-NEXT: [[L:%.*]] = load i16, ptr [[ARRAYIDX15]], align 2
; CHECK-NEXT: [[CMP17:%.*]] = icmp eq i16 [[L]], 0
; CHECK-NEXT: [[EXTRACT_T1:%.*]] = trunc i16 [[L]] to i8
; CHECK-NEXT: br i1 [[CMP17]], label [[IF_THEN:%.*]], label [[IF_END:%.*]]
; CHECK: if.then:
-; CHECK-NEXT: [[DOTPRE:%.*]] = load i16, i16* [[ARRAYIDX15]], align 2
+; CHECK-NEXT: [[DOTPRE:%.*]] = load i16, ptr [[ARRAYIDX15]], align 2
; CHECK-NEXT: [[EXTRACT_T:%.*]] = trunc i16 [[DOTPRE]] to i8
; CHECK-NEXT: br label [[IF_END]]
; CHECK: if.end:
; CHECK-NEXT: [[P_OFF0:%.*]] = phi i8 [ [[EXTRACT_T]], [[IF_THEN]] ], [ [[EXTRACT_T1]], [[FOR_BODY]] ]
; CHECK-NEXT: [[SUB:%.*]] = add i8 [[P_OFF0]], -1
-; CHECK-NEXT: store i8 [[SUB]], i8* undef, align 1
+; CHECK-NEXT: store i8 [[SUB]], ptr undef, align 1
; CHECK-NEXT: br label [[FOR_BODY]]
;
entry:
br label %for.body
for.body:
- %arrayidx15 = getelementptr inbounds i16, i16* %cinfo, i32 2
- %l = load i16, i16* %arrayidx15, align 2
+ %arrayidx15 = getelementptr inbounds i16, ptr %cinfo, i32 2
+ %l = load i16, ptr %arrayidx15, align 2
%cmp17 = icmp eq i16 %l, 0
br i1 %cmp17, label %if.then, label %if.end
if.then:
- %.pre = load i16, i16* %arrayidx15, align 2
+ %.pre = load i16, ptr %arrayidx15, align 2
br label %if.end
if.end:
%p = phi i16 [ %.pre, %if.then ], [ %l, %for.body ]
%conv19 = trunc i16 %p to i8
%sub = add i8 %conv19, -1
- store i8 %sub, i8* undef, align 1
+ store i8 %sub, ptr undef, align 1
br label %for.body
}
ret i32 0
}
-define i32* @test8({ i32, i32 } *%A, i1 %b) {
+define ptr @test8(ptr %A, i1 %b) {
; CHECK-LABEL: @test8(
; CHECK-NEXT: BB0:
; CHECK-NEXT: br i1 [[B:%.*]], label [[BB1:%.*]], label [[BB2:%.*]]
; CHECK: BB1:
; CHECK-NEXT: br label [[BB2]]
; CHECK: BB2:
-; CHECK-NEXT: [[B:%.*]] = getelementptr { i32, i32 }, { i32, i32 }* [[A:%.*]], i64 0, i32 1
-; CHECK-NEXT: ret i32* [[B]]
+; CHECK-NEXT: [[B:%.*]] = getelementptr { i32, i32 }, ptr [[A:%.*]], i64 0, i32 1
+; CHECK-NEXT: ret ptr [[B]]
;
BB0:
- %X = getelementptr inbounds { i32, i32 }, { i32, i32 } *%A, i32 0, i32 1
+ %X = getelementptr inbounds { i32, i32 }, ptr %A, i32 0, i32 1
br i1 %b, label %BB1, label %BB2
BB1:
- %Y = getelementptr { i32, i32 }, { i32, i32 } *%A, i32 0, i32 1
+ %Y = getelementptr { i32, i32 }, ptr %A, i32 0, i32 1
br label %BB2
BB2:
;; Suck GEPs into phi
- %B = phi i32* [ %X, %BB0 ], [ %Y, %BB1 ]
- ret i32* %B
+ %B = phi ptr [ %X, %BB0 ], [ %Y, %BB1 ]
+ ret ptr %B
}
-define i32 @test9(i32* %A, i32* %B) {
+define i32 @test9(ptr %A, ptr %B) {
; CHECK-LABEL: @test9(
; CHECK-NEXT: entry:
-; CHECK-NEXT: [[C:%.*]] = icmp eq i32* [[A:%.*]], null
+; CHECK-NEXT: [[C:%.*]] = icmp eq ptr [[A:%.*]], null
; CHECK-NEXT: br i1 [[C]], label [[BB1:%.*]], label [[BB:%.*]]
; CHECK: bb:
; CHECK-NEXT: br label [[BB2:%.*]]
; CHECK: bb1:
; CHECK-NEXT: br label [[BB2]]
; CHECK: bb2:
-; CHECK-NEXT: [[E_IN:%.*]] = phi i32* [ [[B:%.*]], [[BB]] ], [ [[A]], [[BB1]] ]
-; CHECK-NEXT: [[E:%.*]] = load i32, i32* [[E_IN]], align 1
+; CHECK-NEXT: [[E_IN:%.*]] = phi ptr [ [[B:%.*]], [[BB]] ], [ [[A]], [[BB1]] ]
+; CHECK-NEXT: [[E:%.*]] = load i32, ptr [[E_IN]], align 1
; CHECK-NEXT: ret i32 [[E]]
;
entry:
- %c = icmp eq i32* %A, null
+ %c = icmp eq ptr %A, null
br i1 %c, label %bb1, label %bb
bb:
- %C = load i32, i32* %B, align 1
+ %C = load i32, ptr %B, align 1
br label %bb2
bb1:
- %D = load i32, i32* %A, align 1
+ %D = load i32, ptr %A, align 1
br label %bb2
bb2:
}
-define i32 @test10(i32* %A, i32* %B) {
+define i32 @test10(ptr %A, ptr %B) {
; CHECK-LABEL: @test10(
; CHECK-NEXT: entry:
-; CHECK-NEXT: [[C:%.*]] = icmp eq i32* [[A:%.*]], null
+; CHECK-NEXT: [[C:%.*]] = icmp eq ptr [[A:%.*]], null
; CHECK-NEXT: br i1 [[C]], label [[BB1:%.*]], label [[BB:%.*]]
; CHECK: bb:
; CHECK-NEXT: br label [[BB2:%.*]]
; CHECK: bb1:
; CHECK-NEXT: br label [[BB2]]
; CHECK: bb2:
-; CHECK-NEXT: [[E_IN:%.*]] = phi i32* [ [[B:%.*]], [[BB]] ], [ [[A]], [[BB1]] ]
-; CHECK-NEXT: [[E:%.*]] = load i32, i32* [[E_IN]], align 16
+; CHECK-NEXT: [[E_IN:%.*]] = phi ptr [ [[B:%.*]], [[BB]] ], [ [[A]], [[BB1]] ]
+; CHECK-NEXT: [[E:%.*]] = load i32, ptr [[E_IN]], align 16
; CHECK-NEXT: ret i32 [[E]]
;
entry:
- %c = icmp eq i32* %A, null
+ %c = icmp eq ptr %A, null
br i1 %c, label %bb1, label %bb
bb:
- %C = load i32, i32* %B, align 16
+ %C = load i32, ptr %B, align 16
br label %bb2
bb1:
- %D = load i32, i32* %A, align 32
+ %D = load i32, ptr %A, align 32
br label %bb2
bb2:
;
entry:
%a = alloca i32
- %i = ptrtoint i32* %a to i64
+ %i = ptrtoint ptr %a to i64
%b = call i1 @test11a()
br i1 %b, label %one, label %two
%f = phi i64 [ %x, %one], [%y, %two]
; Change the %f to %i, and the optimizer suddenly becomes a lot smarter
; even though %f must equal %i at this point
- %g = inttoptr i64 %f to i32*
- store i32 10, i32* %g
+ %g = inttoptr i64 %f to ptr
+ store i32 10, ptr %g
%z = call i1 @test11a()
ret i1 %z
}
-define i64 @test12(i1 %cond, i8* %Ptr, i64 %Val) {
+define i64 @test12(i1 %cond, ptr %Ptr, i64 %Val) {
; CHECK-LABEL: @test12(
; CHECK-NEXT: entry:
; CHECK-NEXT: br i1 [[COND:%.*]], label [[END:%.*]], label [[TWO:%.*]]
; CHECK-NEXT: br label [[END]]
; CHECK: end:
; CHECK-NEXT: [[T869_0_OFF64:%.*]] = phi i64 [ 0, [[ENTRY:%.*]] ], [ [[VAL:%.*]], [[TWO]] ]
-; CHECK-NEXT: [[T41:%.*]] = ptrtoint i8* [[PTR:%.*]] to i64
+; CHECK-NEXT: [[T41:%.*]] = ptrtoint ptr [[PTR:%.*]] to i64
; CHECK-NEXT: [[T2:%.*]] = add i64 [[T869_0_OFF64]], [[T41]]
; CHECK-NEXT: ret i64 [[T2]]
;
entry:
- %t41 = ptrtoint i8* %Ptr to i64
+ %t41 = ptrtoint ptr %Ptr to i64
%t42 = zext i64 %t41 to i128
br i1 %cond, label %end, label %two
}
; PR6512 - Shouldn't merge loads from different addr spaces.
-define i32 @test16(i32 addrspace(1)* %pointer1, i32 %flag, i32* %pointer2)
+define i32 @test16(ptr addrspace(1) %pointer1, i32 %flag, ptr %pointer2)
; CHECK-LABEL: @test16(
; CHECK-NEXT: entry:
; CHECK-NEXT: [[RETVAL:%.*]] = alloca i32, align 4
-; CHECK-NEXT: [[POINTER1_ADDR:%.*]] = alloca i32 addrspace(1)*, align 8
-; CHECK-NEXT: [[POINTER2_ADDR:%.*]] = alloca i32*, align 8
-; CHECK-NEXT: store i32 addrspace(1)* [[POINTER1:%.*]], i32 addrspace(1)** [[POINTER1_ADDR]], align 8
-; CHECK-NEXT: store i32* [[POINTER2:%.*]], i32** [[POINTER2_ADDR]], align 8
+; CHECK-NEXT: [[POINTER1_ADDR:%.*]] = alloca ptr addrspace(1), align 8
+; CHECK-NEXT: [[POINTER2_ADDR:%.*]] = alloca ptr, align 8
+; CHECK-NEXT: store ptr addrspace(1) [[POINTER1:%.*]], ptr [[POINTER1_ADDR]], align 8
+; CHECK-NEXT: store ptr [[POINTER2:%.*]], ptr [[POINTER2_ADDR]], align 8
; CHECK-NEXT: [[TOBOOL_NOT:%.*]] = icmp eq i32 [[FLAG:%.*]], 0
; CHECK-NEXT: br i1 [[TOBOOL_NOT]], label [[IF_ELSE:%.*]], label [[IF_THEN:%.*]]
; CHECK: return:
-; CHECK-NEXT: [[T7:%.*]] = load i32, i32* [[RETVAL]], align 4
+; CHECK-NEXT: [[T7:%.*]] = load i32, ptr [[RETVAL]], align 4
; CHECK-NEXT: ret i32 [[T7]]
; CHECK: if.end:
; CHECK-NEXT: [[STOREMERGE:%.*]] = phi i32 [ [[T5:%.*]], [[IF_ELSE]] ], [ [[T2:%.*]], [[IF_THEN]] ]
-; CHECK-NEXT: store i32 [[STOREMERGE]], i32* [[RETVAL]], align 4
+; CHECK-NEXT: store i32 [[STOREMERGE]], ptr [[RETVAL]], align 4
; CHECK-NEXT: br label [[RETURN:%.*]]
; CHECK: if.then:
-; CHECK-NEXT: [[T1:%.*]] = load i32 addrspace(1)*, i32 addrspace(1)** [[POINTER1_ADDR]], align 8
-; CHECK-NEXT: [[T2]] = load i32, i32 addrspace(1)* [[T1]], align 4
+; CHECK-NEXT: [[T1:%.*]] = load ptr addrspace(1), ptr [[POINTER1_ADDR]], align 8
+; CHECK-NEXT: [[T2]] = load i32, ptr addrspace(1) [[T1]], align 4
; CHECK-NEXT: br label [[IF_END:%.*]]
; CHECK: if.else:
-; CHECK-NEXT: [[T3:%.*]] = load i32*, i32** [[POINTER2_ADDR]], align 8
-; CHECK-NEXT: [[T5]] = load i32, i32* [[T3]], align 4
+; CHECK-NEXT: [[T3:%.*]] = load ptr, ptr [[POINTER2_ADDR]], align 8
+; CHECK-NEXT: [[T5]] = load i32, ptr [[T3]], align 4
; CHECK-NEXT: br label [[IF_END]]
;
nounwind {
entry:
- %retval = alloca i32, align 4 ; <i32*> [#uses=2]
- %pointer1.addr = alloca i32 addrspace(1)*, align 4 ; <i32 addrspace(1)**>
- %flag.addr = alloca i32, align 4 ; <i32*> [#uses=2]
- %pointer2.addr = alloca i32*, align 4 ; <i32**> [#uses=2]
- %res = alloca i32, align 4 ; <i32*> [#uses=4]
- store i32 addrspace(1)* %pointer1, i32 addrspace(1)** %pointer1.addr
- store i32 %flag, i32* %flag.addr
- store i32* %pointer2, i32** %pointer2.addr
- store i32 10, i32* %res
- %t = load i32, i32* %flag.addr ; <i32> [#uses=1]
+ %retval = alloca i32, align 4 ; <ptr> [#uses=2]
+ %pointer1.addr = alloca ptr addrspace(1), align 4 ; <ptr>
+ %flag.addr = alloca i32, align 4 ; <ptr> [#uses=2]
+ %pointer2.addr = alloca ptr, align 4 ; <ptr> [#uses=2]
+ %res = alloca i32, align 4 ; <ptr> [#uses=4]
+ store ptr addrspace(1) %pointer1, ptr %pointer1.addr
+ store i32 %flag, ptr %flag.addr
+ store ptr %pointer2, ptr %pointer2.addr
+ store i32 10, ptr %res
+ %t = load i32, ptr %flag.addr ; <i32> [#uses=1]
%tobool = icmp ne i32 %t, 0 ; <i1> [#uses=1]
br i1 %tobool, label %if.then, label %if.else
return: ; preds = %if.end
- %t7 = load i32, i32* %retval ; <i32> [#uses=1]
+ %t7 = load i32, ptr %retval ; <i32> [#uses=1]
ret i32 %t7
if.end: ; preds = %if.else, %if.then
- %t6 = load i32, i32* %res ; <i32> [#uses=1]
- store i32 %t6, i32* %retval
+ %t6 = load i32, ptr %res ; <i32> [#uses=1]
+ store i32 %t6, ptr %retval
br label %return
if.then: ; preds = %entry
- %t1 = load i32 addrspace(1)*, i32 addrspace(1)** %pointer1.addr ; <i32 addrspace(1)*>
- %arrayidx = getelementptr i32, i32 addrspace(1)* %t1, i32 0 ; <i32 addrspace(1)*> [#uses=1]
- %t2 = load i32, i32 addrspace(1)* %arrayidx ; <i32> [#uses=1]
- store i32 %t2, i32* %res
+ %t1 = load ptr addrspace(1), ptr %pointer1.addr ; <ptr addrspace(1)>
+ %arrayidx = getelementptr i32, ptr addrspace(1) %t1, i32 0 ; <ptr addrspace(1)> [#uses=1]
+ %t2 = load i32, ptr addrspace(1) %arrayidx ; <i32> [#uses=1]
+ store i32 %t2, ptr %res
br label %if.end
if.else: ; preds = %entry
- %t3 = load i32*, i32** %pointer2.addr ; <i32*> [#uses=1]
- %arrayidx4 = getelementptr i32, i32* %t3, i32 0 ; <i32*> [#uses=1]
- %t5 = load i32, i32* %arrayidx4 ; <i32> [#uses=1]
- store i32 %t5, i32* %res
+ %t3 = load ptr, ptr %pointer2.addr ; <ptr> [#uses=1]
+ %arrayidx4 = getelementptr i32, ptr %t3, i32 0 ; <ptr> [#uses=1]
+ %t5 = load i32, ptr %arrayidx4 ; <i32> [#uses=1]
+ store i32 %t5, ptr %res
br label %if.end
}
}
; Atomic and non-atomic loads should not be combined.
-define i32 @PR51435(i32* %ptr, i32* %atomic_ptr, i1 %c) {
+define i32 @PR51435(ptr %ptr, ptr %atomic_ptr, i1 %c) {
; CHECK-LABEL: @PR51435(
; CHECK-NEXT: entry:
-; CHECK-NEXT: [[X:%.*]] = load i32, i32* [[PTR:%.*]], align 4
+; CHECK-NEXT: [[X:%.*]] = load i32, ptr [[PTR:%.*]], align 4
; CHECK-NEXT: br i1 [[C:%.*]], label [[IF:%.*]], label [[END:%.*]]
; CHECK: if:
-; CHECK-NEXT: [[Y:%.*]] = load atomic i32, i32* [[ATOMIC_PTR:%.*]] acquire, align 4
+; CHECK-NEXT: [[Y:%.*]] = load atomic i32, ptr [[ATOMIC_PTR:%.*]] acquire, align 4
; CHECK-NEXT: br label [[END]]
; CHECK: end:
; CHECK-NEXT: [[COND:%.*]] = phi i32 [ [[X]], [[ENTRY:%.*]] ], [ [[Y]], [[IF]] ]
; CHECK-NEXT: ret i32 [[COND]]
;
entry:
- %x = load i32, i32* %ptr, align 4
+ %x = load i32, ptr %ptr, align 4
br i1 %c, label %if, label %end
if:
- %y = load atomic i32, i32* %atomic_ptr acquire, align 4
+ %y = load atomic i32, ptr %atomic_ptr acquire, align 4
br label %end
end:
false:
br label %ret
ret:
- %ptr = phi i32* [ %zero, %true ] , [ %one, %false ]
- %isnull = icmp eq i32* %ptr, null
+ %ptr = phi ptr [ %zero, %true ] , [ %one, %false ]
+ %isnull = icmp eq ptr %ptr, null
ret i1 %isnull
}
false:
br label %ret
ret:
- %p = phi i32* [ %a, %true ], [ %b, %false ]
- %r = icmp eq i32* %p, %c
+ %p = phi ptr [ %a, %true ], [ %b, %false ]
+ %r = icmp eq ptr %p, %c
ret i1 %r
}
false:
br label %loop
loop:
- %p = phi i32* [ %a, %true ], [ %b, %false ], [ %p, %loop ]
- %r = icmp eq i32* %p, %c
+ %p = phi ptr [ %a, %true ], [ %b, %false ], [ %p, %loop ]
+ %r = icmp eq ptr %p, %c
br i1 %c2, label %ret, label %loop
ret:
ret i1 %r
ret void
}
-define i32 @test23(i32 %A, i1 %pb, i32 * %P) {
+define i32 @test23(i32 %A, i1 %pb, ptr %P) {
; CHECK-LABEL: @test23(
; CHECK-NEXT: BB0:
; CHECK-NEXT: [[PHI_BO:%.*]] = add i32 [[A:%.*]], 19
; CHECK-NEXT: br label [[LOOP:%.*]]
; CHECK: Loop:
; CHECK-NEXT: [[B:%.*]] = phi i32 [ [[PHI_BO]], [[BB0:%.*]] ], [ 61, [[LOOP]] ]
-; CHECK-NEXT: store i32 [[B]], i32* [[P:%.*]], align 4
+; CHECK-NEXT: store i32 [[B]], ptr [[P:%.*]], align 4
; CHECK-NEXT: br i1 [[PB:%.*]], label [[LOOP]], label [[EXIT:%.*]]
; CHECK: Exit:
; CHECK-NEXT: ret i32 [[B]]
; PHI has same value always.
%B = phi i32 [ %A, %BB0 ], [ 42, %Loop ]
%D = add i32 %B, 19
- store i32 %D, i32* %P
+ store i32 %D, ptr %P
br i1 %pb, label %Loop, label %Exit
Exit: ; preds = %Loop
;
entry:
%a = alloca i32
- %i = ptrtoint i32* %a to i64
+ %i = ptrtoint ptr %a to i64
%b = call i1 @test25a()
br i1 %b, label %one, label %two
%f = phi i64 [ %x, %one], [%y, %two]
; Change the %f to %i, and the optimizer suddenly becomes a lot smarter
; even though %f must equal %i at this point
- %g = inttoptr i64 %f to i32*
- store i32 10, i32* %g
+ %g = inttoptr i64 %f to ptr
+ store i32 10, ptr %g
%z = call i1 @test25a()
ret i1 %z
}
;
entry:
%a = alloca i32
- %i = ptrtoint i32* %a to i64
+ %i = ptrtoint ptr %a to i64
%b = call i1 @test26a()
br label %one
%f = phi i64 [ %x, %one], [%y, %two]
; Change the %f to %i, and the optimizer suddenly becomes a lot smarter
; even though %f must equal %i at this point
- %g = inttoptr i64 %f to i32*
- store i32 10, i32* %g
+ %g = inttoptr i64 %f to ptr
+ store i32 10, ptr %g
%z = call i1 @test26a()
ret i1 %z
}
ret i1 %cmp1
}
-define i1 @phi_allnonzerononconstant(i1 %c, i32 %a, i32* nonnull %b1, i32* nonnull %b2) {
+define i1 @phi_allnonzerononconstant(i1 %c, i32 %a, ptr nonnull %b1, ptr nonnull %b2) {
; CHECK-LABEL: @phi_allnonzerononconstant(
; CHECK-NEXT: entry:
; CHECK-NEXT: br i1 [[C:%.*]], label [[IF_THEN:%.*]], label [[IF_ELSE:%.*]]
br label %if.end
if.end: ; preds = %if.else, %if.then
- %x.0 = phi i32* [ %b1, %if.then ], [ %b2, %if.else ]
- %cmp1 = icmp eq i32* %x.0, null
+ %x.0 = phi ptr [ %b1, %if.then ], [ %b2, %if.else ]
+ %cmp1 = icmp eq ptr %x.0, null
ret i1 %cmp1
}
declare void @dummy()
-define i1 @phi_knownnonzero_eq(i32 %n, i32 %s, i32* nocapture readonly %P) {
+define i1 @phi_knownnonzero_eq(i32 %n, i32 %s, ptr nocapture readonly %P) {
; CHECK-LABEL: @phi_knownnonzero_eq(
; CHECK-NEXT: entry:
; CHECK-NEXT: [[TOBOOL:%.*]] = icmp slt i32 [[N:%.*]], [[S:%.*]]
br i1 %tobool, label %if.end, label %if.then
if.then: ; preds = %entry
- %0 = load i32, i32* %P
+ %0 = load i32, ptr %P
%cmp = icmp eq i32 %n, %0
%1 = select i1 %cmp, i32 1, i32 2
br label %if.end
ret i1 %cmp1
}
-define i1 @phi_knownnonzero_ne(i32 %n, i32 %s, i32* nocapture readonly %P) {
+define i1 @phi_knownnonzero_ne(i32 %n, i32 %s, ptr nocapture readonly %P) {
; CHECK-LABEL: @phi_knownnonzero_ne(
; CHECK-NEXT: entry:
; CHECK-NEXT: [[TOBOOL:%.*]] = icmp slt i32 [[N:%.*]], [[S:%.*]]
br i1 %tobool, label %if.end, label %if.then
if.then: ; preds = %entry
- %0 = load i32, i32* %P
+ %0 = load i32, ptr %P
%cmp = icmp eq i32 %n, %0
%1 = select i1 %cmp, i32 1, i32 2
br label %if.end
ret i1 %cmp1
}
-define i1 @phi_knownnonzero_eq_2(i32 %n, i32 %s, i32* nocapture readonly %P) {
+define i1 @phi_knownnonzero_eq_2(i32 %n, i32 %s, ptr nocapture readonly %P) {
; CHECK-LABEL: @phi_knownnonzero_eq_2(
; CHECK-NEXT: entry:
; CHECK-NEXT: [[TOBOOL:%.*]] = icmp slt i32 [[N:%.*]], [[S:%.*]]
br i1 %tobool2, label %if.else, label %if.end
if.else: ; preds = %entry
- %0 = load i32, i32* %P
+ %0 = load i32, ptr %P
%cmp = icmp eq i32 %n, %0
%1 = select i1 %cmp, i32 1, i32 2
br label %if.end
ret i1 %cmp1
}
-define i1 @phi_knownnonzero_ne_2(i32 %n, i32 %s, i32* nocapture readonly %P) {
+define i1 @phi_knownnonzero_ne_2(i32 %n, i32 %s, ptr nocapture readonly %P) {
; CHECK-LABEL: @phi_knownnonzero_ne_2(
; CHECK-NEXT: entry:
; CHECK-NEXT: [[TOBOOL:%.*]] = icmp slt i32 [[N:%.*]], [[S:%.*]]
br i1 %tobool2, label %if.else, label %if.end
if.else: ; preds = %entry
- %0 = load i32, i32* %P
+ %0 = load i32, ptr %P
%cmp = icmp eq i32 %n, %0
%1 = select i1 %cmp, i32 1, i32 2
br label %if.end
br label %for.cond
for.end:
- store double %p, double* undef
+ store double %p, ptr undef
ret void
}
-define i1 @pr57488_icmp_of_phi(i64* %ptr.base, i64 %len) {
+define i1 @pr57488_icmp_of_phi(ptr %ptr.base, i64 %len) {
; CHECK-LABEL: @pr57488_icmp_of_phi(
; CHECK-NEXT: start:
-; CHECK-NEXT: [[END:%.*]] = getelementptr inbounds i64, i64* [[PTR_BASE:%.*]], i64 [[LEN:%.*]]
+; CHECK-NEXT: [[END:%.*]] = getelementptr inbounds i64, ptr [[PTR_BASE:%.*]], i64 [[LEN:%.*]]
; CHECK-NEXT: [[LEN_ZERO:%.*]] = icmp eq i64 [[LEN]], 0
; CHECK-NEXT: br i1 [[LEN_ZERO]], label [[EXIT:%.*]], label [[LOOP:%.*]]
; CHECK: loop:
; CHECK-NEXT: [[ACCUM:%.*]] = phi i8 [ [[ACCUM_NEXT:%.*]], [[LOOP]] ], [ 1, [[START:%.*]] ]
-; CHECK-NEXT: [[PTR:%.*]] = phi i64* [ [[PTR_NEXT:%.*]], [[LOOP]] ], [ [[PTR_BASE]], [[START]] ]
-; CHECK-NEXT: [[PTR_NEXT]] = getelementptr inbounds i64, i64* [[PTR]], i64 1
+; CHECK-NEXT: [[PTR:%.*]] = phi ptr [ [[PTR_NEXT:%.*]], [[LOOP]] ], [ [[PTR_BASE]], [[START]] ]
+; CHECK-NEXT: [[PTR_NEXT]] = getelementptr inbounds i64, ptr [[PTR]], i64 1
; CHECK-NEXT: [[ACCUM_BOOL:%.*]] = icmp ne i8 [[ACCUM]], 0
-; CHECK-NEXT: [[VAL:%.*]] = load i64, i64* [[PTR]], align 8
+; CHECK-NEXT: [[VAL:%.*]] = load i64, ptr [[PTR]], align 8
; CHECK-NEXT: [[VAL_ZERO:%.*]] = icmp eq i64 [[VAL]], 0
; CHECK-NEXT: [[AND:%.*]] = and i1 [[ACCUM_BOOL]], [[VAL_ZERO]]
; CHECK-NEXT: [[ACCUM_NEXT]] = zext i1 [[AND]] to i8
-; CHECK-NEXT: [[EXIT_COND:%.*]] = icmp eq i64* [[PTR_NEXT]], [[END]]
+; CHECK-NEXT: [[EXIT_COND:%.*]] = icmp eq ptr [[PTR_NEXT]], [[END]]
; CHECK-NEXT: br i1 [[EXIT_COND]], label [[EXIT]], label [[LOOP]]
; CHECK: exit:
; CHECK-NEXT: [[RES:%.*]] = phi i1 [ true, [[START]] ], [ [[AND]], [[LOOP]] ]
; CHECK-NEXT: ret i1 [[RES]]
;
start:
- %end = getelementptr inbounds i64, i64* %ptr.base, i64 %len
+ %end = getelementptr inbounds i64, ptr %ptr.base, i64 %len
%len.zero = icmp eq i64 %len, 0
br i1 %len.zero, label %exit, label %loop
loop:
%accum = phi i8 [ %accum.next, %loop ], [ 1, %start ]
- %ptr = phi i64* [ %ptr.next, %loop ], [ %ptr.base, %start ]
- %ptr.next = getelementptr inbounds i64, i64* %ptr, i64 1
+ %ptr = phi ptr [ %ptr.next, %loop ], [ %ptr.base, %start ]
+ %ptr.next = getelementptr inbounds i64, ptr %ptr, i64 1
%accum.bool = icmp ne i8 %accum, 0
- %val = load i64, i64* %ptr, align 8
+ %val = load i64, ptr %ptr, align 8
%val.zero = icmp eq i64 %val, 0
%and = and i1 %accum.bool, %val.zero
%accum.next = zext i1 %and to i8
- %exit.cond = icmp eq i64* %ptr.next, %end
+ %exit.cond = icmp eq ptr %ptr.next, %end
br i1 %exit.cond, label %exit, label %loop
exit:
ret fp128 %pow
}
-define float @reuse_fast(float %x, float %y, float * %p) {
+define float @reuse_fast(float %x, float %y, ptr %p) {
; CHECK-LABEL: @reuse_fast(
; CHECK-NEXT: [[EXP:%.*]] = call fast float @expf(float [[X:%.*]])
; CHECK-NEXT: [[POW:%.*]] = call fast float @powf(float [[EXP]], float [[Y:%.*]])
-; CHECK-NEXT: store float [[EXP]], float* [[P:%.*]], align 4
+; CHECK-NEXT: store float [[EXP]], ptr [[P:%.*]], align 4
; CHECK-NEXT: ret float [[POW]]
;
%exp = call fast float @expf(float %x)
%pow = call fast float @powf(float %exp, float %y)
- store float %exp, float *%p, align 4
+ store float %exp, ptr %p, align 4
ret float %pow
}
-define fp128 @reuse_libcall(fp128 %x, fp128 %y, fp128 * %p) {
+define fp128 @reuse_libcall(fp128 %x, fp128 %y, ptr %p) {
; CHECK-LABEL: @reuse_libcall(
; CHECK-NEXT: [[EXP:%.*]] = call fp128 @expl(fp128 [[X:%.*]])
; CHECK-NEXT: [[POW:%.*]] = call fp128 @powl(fp128 [[EXP]], fp128 [[Y:%.*]])
-; CHECK-NEXT: store fp128 [[EXP]], fp128* [[P:%.*]], align 16
+; CHECK-NEXT: store fp128 [[EXP]], ptr [[P:%.*]], align 16
; CHECK-NEXT: ret fp128 [[POW]]
;
%exp = call fp128 @expl(fp128 %x)
%pow = call fp128 @powl(fp128 %exp, fp128 %y)
- store fp128 %exp, fp128 *%p, align 16
+ store fp128 %exp, ptr %p, align 16
ret fp128 %pow
}
-define double @function_pointer(double ()* %fptr, double %p1) {
+define double @function_pointer(ptr %fptr, double %p1) {
; CHECK-LABEL: @function_pointer(
; CHECK-NEXT: [[CALL1:%.*]] = call fast double [[FPTR:%.*]]()
; CHECK-NEXT: [[POW:%.*]] = call fast double @llvm.pow.f64(double [[CALL1]], double [[P1:%.*]])
; RUN: opt < %s -passes=instcombine -S | FileCheck %s
-define zeroext i1 @_Z3fooPb(i8* nocapture %x) {
+define zeroext i1 @_Z3fooPb(ptr nocapture %x) {
entry:
- %a = load i8, i8* %x, align 1, !range !0
+ %a = load i8, ptr %x, align 1, !range !0
%b = and i8 %a, 1
%tobool = icmp ne i8 %b, 0
ret i1 %tobool
}
-; CHECK: %a = load i8, i8* %x, align 1, !range !0
+; CHECK: %a = load i8, ptr %x, align 1, !range !0
; CHECK-NEXT: %tobool = icmp ne i8 %a, 0
; CHECK-NEXT: ret i1 %tobool
define i1 @f(i8 zeroext %p) #1 {
; CHECK-NOT: ret i1 false
%1 = zext i8 %p to i32
- %2 = load i32, i32* @d, align 4
+ %2 = load i32, ptr @d, align 4
%3 = or i32 %2, -2
%4 = add nsw i32 %3, %1
%5 = icmp ugt i32 %1, %4
define void @fn3() {
; CHECK: @fn3
bb:
- %tmp = load i32, i32* @c, align 4
+ %tmp = load i32, ptr @c, align 4
%tmp1 = icmp eq i32 %tmp, 0
br i1 %tmp1, label %bb2, label %bb6
bb2: ; preds = %bb
- %tmp3 = load i32, i32* @b, align 4
+ %tmp3 = load i32, ptr @b, align 4
%tmp.i = add nsw i32 255, %tmp3
%tmp5 = icmp ugt i32 %tmp.i, 254
br label %bb6
%tmp7 = phi i1 [ true, %bb ], [ %tmp5, %bb2 ]
%tmp8 = zext i1 %tmp7 to i32
%tmp10 = icmp eq i32 %tmp8, 0
- %tmp12 = load i16, i16* @a, align 2
+ %tmp12 = load i16, ptr @a, align 2
%tmp14 = icmp ne i16 %tmp12, 0
%tmp16 = select i1 %tmp10, i1 false, i1 %tmp14
%tmp17 = zext i1 %tmp16 to i32
- store i32 %tmp17, i32* @d, align 4
+ store i32 %tmp17, ptr @d, align 4
ret void
}
target datalayout = "e-m:e-i64:64-f80:128-n8:16:32:64-S128"
target triple = "x86_64-unknown-linux-gnu"
-define i1 @f(i8* %a, i8 %b) {
+define i1 @f(ptr %a, i8 %b) {
; CHECK-LABEL: @f(
entry:
%or = or i8 %b, -117
%sub = add i8 %or, -1
- store i8 %sub, i8* %a, align 1
+ store i8 %sub, ptr %a, align 1
%cmp = icmp ugt i8 %or, %sub
ret i1 %cmp
; CHECK: ret i1 true
; CHECK-NEXT: [[CMP:%.*]] = icmp slt i32 [[I_0]], [[N:%.*]]
; CHECK-NEXT: br i1 [[CMP]], label [[FOR_BODY]], label [[FOR_END:%.*]]
; CHECK: for.body:
-; CHECK-NEXT: [[TMP2:%.*]] = load float, float* getelementptr inbounds (%"struct.std::complex", %"struct.std::complex"* @dd, i64 0, i32 0, i32 0), align 4
-; CHECK-NEXT: [[TMP3:%.*]] = load float, float* getelementptr inbounds (%"struct.std::complex", %"struct.std::complex"* @dd, i64 0, i32 0, i32 1), align 4
-; CHECK-NEXT: [[TMP4:%.*]] = load float, float* getelementptr inbounds (%"struct.std::complex", %"struct.std::complex"* @dd2, i64 0, i32 0, i32 0), align 4
-; CHECK-NEXT: [[TMP5:%.*]] = load float, float* getelementptr inbounds (%"struct.std::complex", %"struct.std::complex"* @dd2, i64 0, i32 0, i32 1), align 4
+; CHECK-NEXT: [[TMP2:%.*]] = load float, ptr @dd, align 4
+; CHECK-NEXT: [[TMP3:%.*]] = load float, ptr getelementptr inbounds (%"struct.std::complex", ptr @dd, i64 0, i32 0, i32 1), align 4
+; CHECK-NEXT: [[TMP4:%.*]] = load float, ptr @dd2, align 4
+; CHECK-NEXT: [[TMP5:%.*]] = load float, ptr getelementptr inbounds (%"struct.std::complex", ptr @dd2, i64 0, i32 0, i32 1), align 4
; CHECK-NEXT: [[MUL_I:%.*]] = fmul float [[TMP2]], [[TMP4]]
; CHECK-NEXT: [[MUL4_I:%.*]] = fmul float [[TMP3]], [[TMP5]]
; CHECK-NEXT: [[SUB_I:%.*]] = fsub float [[MUL_I]], [[MUL4_I]]
; CHECK-NEXT: [[INC]] = add nuw nsw i32 [[I_0]], 1
; CHECK-NEXT: br label [[FOR_COND]]
; CHECK: for.end:
-; CHECK-NEXT: store float [[TMP0]], float* getelementptr inbounds (%"struct.std::complex", %"struct.std::complex"* @dd, i64 0, i32 0, i32 0), align 4
-; CHECK-NEXT: store float [[TMP1]], float* getelementptr inbounds (%"struct.std::complex", %"struct.std::complex"* @dd, i64 0, i32 0, i32 1), align 4
+; CHECK-NEXT: store float [[TMP0]], ptr @dd, align 4
+; CHECK-NEXT: store float [[TMP1]], ptr getelementptr inbounds (%"struct.std::complex", ptr @dd, i64 0, i32 0, i32 1), align 4
; CHECK-NEXT: ret void
;
entry:
br i1 %cmp, label %for.body, label %for.end
for.body:
- %0 = load float, float* getelementptr inbounds (%"struct.std::complex", %"struct.std::complex"* @dd, i64 0, i32 0, i32 0), align 4
- %1 = load float, float* getelementptr inbounds (%"struct.std::complex", %"struct.std::complex"* @dd, i64 0, i32 0, i32 1), align 4
- %2 = load float, float* getelementptr inbounds (%"struct.std::complex", %"struct.std::complex"* @dd2, i64 0, i32 0, i32 0), align 4
- %3 = load float, float* getelementptr inbounds (%"struct.std::complex", %"struct.std::complex"* @dd2, i64 0, i32 0, i32 1), align 4
+ %0 = load float, ptr @dd, align 4
+ %1 = load float, ptr getelementptr inbounds (%"struct.std::complex", ptr @dd, i64 0, i32 0, i32 1), align 4
+ %2 = load float, ptr @dd2, align 4
+ %3 = load float, ptr getelementptr inbounds (%"struct.std::complex", ptr @dd2, i64 0, i32 0, i32 1), align 4
%mul.i = fmul float %0, %2
%mul4.i = fmul float %1, %3
%sub.i = fsub float %mul.i, %mul4.i
br label %for.cond
for.end:
- store i32 %ldd.sroa.0.0, i32* bitcast (%"struct.std::complex"* @dd to i32*), align 4
- store i32 %ldd.sroa.6.0, i32* bitcast (float* getelementptr inbounds (%"struct.std::complex", %"struct.std::complex"* @dd, i64 0, i32 0, i32 1) to i32*), align 4
+ store i32 %ldd.sroa.0.0, ptr @dd, align 4
+ store i32 %ldd.sroa.6.0, ptr getelementptr inbounds (%"struct.std::complex", ptr @dd, i64 0, i32 0, i32 1), align 4
ret void
}
; CHECK-NEXT: [[CMP:%.*]] = icmp slt i32 [[I_0]], [[N:%.*]]
; CHECK-NEXT: br i1 [[CMP]], label [[FOR_BODY:%.*]], label [[FOR_END:%.*]]
; CHECK: for.body:
-; CHECK-NEXT: [[TMP1:%.*]] = load float, float* getelementptr inbounds (%"struct.std::complex", %"struct.std::complex"* @dd, i64 0, i32 0, i32 0), align 4
-; CHECK-NEXT: [[TMP2:%.*]] = load float, float* getelementptr inbounds (%"struct.std::complex", %"struct.std::complex"* @dd, i64 0, i32 0, i32 1), align 4
-; CHECK-NEXT: [[TMP3:%.*]] = load float, float* getelementptr inbounds (%"struct.std::complex", %"struct.std::complex"* @dd2, i64 0, i32 0, i32 0), align 4
-; CHECK-NEXT: [[TMP4:%.*]] = load float, float* getelementptr inbounds (%"struct.std::complex", %"struct.std::complex"* @dd2, i64 0, i32 0, i32 1), align 4
+; CHECK-NEXT: [[TMP1:%.*]] = load float, ptr @dd, align 4
+; CHECK-NEXT: [[TMP2:%.*]] = load float, ptr getelementptr inbounds (%"struct.std::complex", ptr @dd, i64 0, i32 0, i32 1), align 4
+; CHECK-NEXT: [[TMP3:%.*]] = load float, ptr @dd2, align 4
+; CHECK-NEXT: [[TMP4:%.*]] = load float, ptr getelementptr inbounds (%"struct.std::complex", ptr @dd2, i64 0, i32 0, i32 1), align 4
; CHECK-NEXT: [[MUL_I:%.*]] = fmul float [[TMP1]], [[TMP3]]
; CHECK-NEXT: [[MUL4_I:%.*]] = fmul float [[TMP2]], [[TMP4]]
; CHECK-NEXT: [[SUB_I:%.*]] = fsub float [[MUL_I]], [[MUL4_I]]
; CHECK-NEXT: [[TMP6]] = phi float [ [[ADD_I]], [[FOR_BODY]] ], [ [[TMP5]], [[EVEN_BB]] ]
; CHECK-NEXT: br label [[FOR_COND]]
; CHECK: for.end:
-; CHECK-NEXT: store float [[TMP0]], float* getelementptr inbounds (%"struct.std::complex", %"struct.std::complex"* @dd, i64 0, i32 0, i32 0), align 4
+; CHECK-NEXT: store float [[TMP0]], ptr @dd, align 4
; CHECK-NEXT: ret void
;
entry:
br i1 %cmp, label %for.body, label %for.end
for.body:
- %0 = load float, float* getelementptr inbounds (%"struct.std::complex", %"struct.std::complex"* @dd, i64 0, i32 0, i32 0), align 4
- %1 = load float, float* getelementptr inbounds (%"struct.std::complex", %"struct.std::complex"* @dd, i64 0, i32 0, i32 1), align 4
- %2 = load float, float* getelementptr inbounds (%"struct.std::complex", %"struct.std::complex"* @dd2, i64 0, i32 0, i32 0), align 4
- %3 = load float, float* getelementptr inbounds (%"struct.std::complex", %"struct.std::complex"* @dd2, i64 0, i32 0, i32 1), align 4
+ %0 = load float, ptr @dd, align 4
+ %1 = load float, ptr getelementptr inbounds (%"struct.std::complex", ptr @dd, i64 0, i32 0, i32 1), align 4
+ %2 = load float, ptr @dd2, align 4
+ %3 = load float, ptr getelementptr inbounds (%"struct.std::complex", ptr @dd2, i64 0, i32 0, i32 1), align 4
%mul.i = fmul float %0, %2
%mul4.i = fmul float %1, %3
%sub.i = fsub float %mul.i, %mul4.i
br label %for.cond
for.end:
- store i32 %ldd.sroa.0.0, i32* bitcast (%"struct.std::complex"* @dd to i32*), align 4
+ store i32 %ldd.sroa.0.0, ptr @dd, align 4
ret void
}
; Instcombine should be able to prove that none of the
; insertelement's first operand's elements are needed.
-define internal void @""(i8*) {
+define internal void @""(ptr) {
; <label>:1
- bitcast i8* %0 to i32* ; <i32*>:2 [#uses=1]
- load i32, i32* %2, align 1 ; <i32>:3 [#uses=1]
- getelementptr i8, i8* %0, i32 4 ; <i8*>:4 [#uses=1]
- bitcast i8* %4 to i32* ; <i32*>:5 [#uses=1]
- load i32, i32* %5, align 1 ; <i32>:6 [#uses=1]
+ bitcast ptr %0 to ptr ; <ptr>:2 [#uses=1]
+ load i32, ptr %2, align 1 ; <i32>:3 [#uses=1]
+ getelementptr i8, ptr %0, i32 4 ; <ptr>:4 [#uses=1]
+ bitcast ptr %4 to ptr ; <ptr>:5 [#uses=1]
+ load i32, ptr %5, align 1 ; <i32>:6 [#uses=1]
br label %7
; <label>:7 ; preds = %9, %1
sitofp i32 %.0 to float ; <float>:10 [#uses=1]
insertelement <4 x float> %.01, float %10, i32 0 ; <<4 x float>>:11 [#uses=1]
shufflevector <4 x float> %11, <4 x float> poison, <4 x i32> zeroinitializer ; <<4 x float>>:12 [#uses=2]
- getelementptr i8, i8* %0, i32 48 ; <i8*>:13 [#uses=1]
- bitcast i8* %13 to <4 x float>* ; <<4 x float>*>:14 [#uses=1]
- store <4 x float> %12, <4 x float>* %14, align 16
+ getelementptr i8, ptr %0, i32 48 ; <ptr>:13 [#uses=1]
+ bitcast ptr %13 to ptr ; <ptr>:14 [#uses=1]
+ store <4 x float> %12, ptr %14, align 16
add i32 %.0, 2 ; <i32>:15 [#uses=1]
br label %7
; Instcombine should be able to prove that none of the
; insertelement's first operand's elements are needed.
-define internal void @""(i8*) {
+define internal void @""(ptr) {
; <label>:1
- bitcast i8* %0 to i32* ; <i32*>:2 [#uses=1]
- load i32, i32* %2, align 1 ; <i32>:3 [#uses=1]
- getelementptr i8, i8* %0, i32 4 ; <i8*>:4 [#uses=1]
- bitcast i8* %4 to i32* ; <i32*>:5 [#uses=1]
- load i32, i32* %5, align 1 ; <i32>:6 [#uses=1]
+ bitcast ptr %0 to ptr ; <ptr>:2 [#uses=1]
+ load i32, ptr %2, align 1 ; <i32>:3 [#uses=1]
+ getelementptr i8, ptr %0, i32 4 ; <ptr>:4 [#uses=1]
+ bitcast ptr %4 to ptr ; <ptr>:5 [#uses=1]
+ load i32, ptr %5, align 1 ; <i32>:6 [#uses=1]
br label %7
; <label>:7 ; preds = %9, %1
sitofp i32 %.0 to float ; <float>:10 [#uses=1]
insertelement <4 x float> %.01, float %10, i32 0 ; <<4 x float>>:11 [#uses=1]
shufflevector <4 x float> %11, <4 x float> undef, <4 x i32> zeroinitializer ; <<4 x float>>:12 [#uses=2]
- getelementptr i8, i8* %0, i32 48 ; <i8*>:13 [#uses=1]
- bitcast i8* %13 to <4 x float>* ; <<4 x float>*>:14 [#uses=1]
- store <4 x float> %12, <4 x float>* %14, align 16
+ getelementptr i8, ptr %0, i32 48 ; <ptr>:13 [#uses=1]
+ bitcast ptr %13 to ptr ; <ptr>:14 [#uses=1]
+ store <4 x float> %12, ptr %14, align 16
add i32 %.0, 2 ; <i32>:15 [#uses=1]
br label %7
; RUN: opt -passes=instcombine -S < %s | FileCheck %s
target triple = "x86_64-pc-windows-msvc"
-define i1 @test1(i8* %p) personality i32 (...)* @__CxxFrameHandler3 {
+define i1 @test1(ptr %p) personality ptr @__CxxFrameHandler3 {
; CHECK-LABEL: @test1(
; CHECK-NEXT: entry:
-; CHECK-NEXT: [[A:%.*]] = getelementptr i8, i8* [[P:%.*]], i64 1
+; CHECK-NEXT: [[A:%.*]] = getelementptr i8, ptr [[P:%.*]], i64 1
; CHECK-NEXT: invoke void @may_throw()
; CHECK-NEXT: to label [[INVOKE_CONT:%.*]] unwind label [[CATCH_DISPATCH:%.*]]
; CHECK: invoke.cont:
-; CHECK-NEXT: [[B:%.*]] = getelementptr i8, i8* [[P]], i64 2
+; CHECK-NEXT: [[B:%.*]] = getelementptr i8, ptr [[P]], i64 2
; CHECK-NEXT: invoke void @may_throw()
; CHECK-NEXT: to label [[EXIT:%.*]] unwind label [[CATCH_DISPATCH]]
; CHECK: catch.dispatch:
-; CHECK-NEXT: [[C:%.*]] = phi i8* [ [[B]], [[INVOKE_CONT]] ], [ [[A]], [[ENTRY:%.*]] ]
+; CHECK-NEXT: [[C:%.*]] = phi ptr [ [[B]], [[INVOKE_CONT]] ], [ [[A]], [[ENTRY:%.*]] ]
; CHECK-NEXT: [[TMP1:%.*]] = catchswitch within none [label %catch] unwind to caller
; CHECK: catch:
-; CHECK-NEXT: [[TMP2:%.*]] = catchpad within [[TMP1]] [i8* null, i32 64, i8* null]
+; CHECK-NEXT: [[TMP2:%.*]] = catchpad within [[TMP1]] [ptr null, i32 64, ptr null]
; CHECK-NEXT: catchret from [[TMP2]] to label [[EXIT]]
; CHECK: exit:
-; CHECK-NEXT: [[D:%.*]] = phi i8* [ [[A]], [[INVOKE_CONT]] ], [ [[C]], [[CATCH:%.*]] ]
-; CHECK-NEXT: [[CMP:%.*]] = icmp eq i8* [[D]], [[A]]
+; CHECK-NEXT: [[D:%.*]] = phi ptr [ [[A]], [[INVOKE_CONT]] ], [ [[C]], [[CATCH:%.*]] ]
+; CHECK-NEXT: [[CMP:%.*]] = icmp eq ptr [[D]], [[A]]
; CHECK-NEXT: ret i1 [[CMP]]
;
entry:
- %a = getelementptr i8, i8* %p, i64 1
+ %a = getelementptr i8, ptr %p, i64 1
invoke void @may_throw()
to label %invoke.cont unwind label %catch.dispatch
invoke.cont:
- %b = getelementptr inbounds i8, i8* %a, i64 1
+ %b = getelementptr inbounds i8, ptr %a, i64 1
invoke void @may_throw()
to label %exit unwind label %catch.dispatch
catch.dispatch:
- %c = phi i8* [ %b, %invoke.cont ], [ %a, %entry ]
+ %c = phi ptr [ %b, %invoke.cont ], [ %a, %entry ]
%tmp1 = catchswitch within none [label %catch] unwind to caller
catch:
- %tmp2 = catchpad within %tmp1 [i8* null, i32 64, i8* null]
+ %tmp2 = catchpad within %tmp1 [ptr null, i32 64, ptr null]
catchret from %tmp2 to label %exit
exit:
- %d = phi i8* [ %a, %invoke.cont ], [ %c, %catch ]
- %cmp = icmp eq i8* %d, %a
+ %d = phi ptr [ %a, %invoke.cont ], [ %c, %catch ]
+ %cmp = icmp eq ptr %d, %a
ret i1 %cmp
}
;
entry:
%x.addr = alloca float, align 4
- store float %x, float* %x.addr, align 4
- %0 = load float, float* %x.addr, align 4
+ store float %x, ptr %x.addr, align 4
+ %0 = load float, ptr %x.addr, align 4
%1 = bitcast float %0 to i32
%shl = shl i32 %1, 1
%cmp = icmp ugt i32 %shl, -16777216
@i = constant i32 1, align 4
@f = constant float 0x3FF19999A0000000, align 4
@cmp = common global i32 0, align 4
-@resf = common global float* null, align 8
-@resi = common global i32* null, align 8
+@resf = common global ptr null, align 8
+@resi = common global ptr null, align 8
define i32 @foo() {
entry:
br label %while.cond
while.cond:
- %res.0 = phi i32* [ null, %entry ], [ @i, %if.then ], [ bitcast (float* @f to i32*), %if.else ]
- %0 = load i32, i32* @cmp, align 4
+ %res.0 = phi ptr [ null, %entry ], [ @i, %if.then ], [ @f, %if.else ]
+ %0 = load i32, ptr @cmp, align 4
%shr = ashr i32 %0, 1
- store i32 %shr, i32* @cmp, align 4
+ store i32 %shr, ptr @cmp, align 4
%tobool = icmp ne i32 %shr, 0
br i1 %tobool, label %while.body, label %while.end
br label %while.cond
while.end:
- %1 = bitcast i32* %res.0 to float*
- store float* %1, float** @resf, align 8
- store i32* %res.0, i32** @resi, align 8
+ store ptr %res.0, ptr @resf, align 8
+ store ptr %res.0, ptr @resi, align 8
ret i32 0
; CHECK-NOT: bitcast i32
define void @foo() {
entry:
%0 = alloca i8
- %1 = bitcast i8* %0 to i4*
- call void @bar(i4* %1)
- %2 = bitcast i4* %1 to i8*
- call void @llvm.memcpy.p0i8.p0i8.i32(i8* %2, i8* @g, i32 1, i1 false)
- call void @gaz(i8* %2)
+ call void @bar(ptr %0)
+ call void @llvm.memcpy.p0.p0.i32(ptr %0, ptr @g, i32 1, i1 false)
+ call void @gaz(ptr %0)
ret void
}
-declare void @llvm.memcpy.p0i8.p0i8.i32(i8* nocapture writeonly, i8* nocapture readonly, i32, i1)
-declare void @bar(i4*)
-declare void @gaz(i8*)
+declare void @llvm.memcpy.p0.p0.i32(ptr nocapture writeonly, ptr nocapture readonly, i32, i1)
+declare void @bar(ptr)
+declare void @gaz(ptr)
; The mempcy should be simplified to a single store of an i8, not i4
; CHECK: store i8 -1
define void @tinkywinky() {
; CHECK-LABEL: @tinkywinky(
-; CHECK-NEXT: [[PATATINO:%.*]] = load i8, i8* @a, align 1
+; CHECK-NEXT: [[PATATINO:%.*]] = load i8, ptr @a, align 1
; CHECK-NEXT: [[TOBOOL:%.*]] = icmp eq i8 [[PATATINO]], 0
; CHECK-NEXT: [[TMP1:%.*]] = zext i1 [[TOBOOL]] to i32
-; CHECK-NEXT: [[OR1:%.*]] = or i32 [[TMP1]], or (i32 zext (i1 icmp ne (i32* bitcast (i8* @a to i32*), i32* @b) to i32), i32 2)
-; CHECK-NEXT: store i32 [[OR1]], i32* @b, align 4
+; CHECK-NEXT: [[OR1:%.*]] = or i32 [[TMP1]], or (i32 zext (i1 icmp ne (ptr @a, ptr @b) to i32), i32 2)
+; CHECK-NEXT: store i32 [[OR1]], ptr @b, align 4
; CHECK-NEXT: ret void
;
- %patatino = load i8, i8* @a
+ %patatino = load i8, ptr @a
%tobool = icmp ne i8 %patatino, 0
%lnot = xor i1 %tobool, true
%lnot.ext = zext i1 %lnot to i32
- %or = or i32 xor (i32 zext (i1 icmp ne (i32* bitcast (i8* @a to i32*), i32* @b) to i32), i32 2), %lnot.ext
- store i32 %or, i32* @b, align 4
+ %or = or i32 xor (i32 zext (i1 icmp ne (ptr @a, ptr @b) to i32), i32 2), %lnot.ext
+ store i32 %or, ptr @b, align 4
ret void
}
define float @patatino() {
; CHECK-LABEL: @patatino(
-; CHECK-NEXT: [[FMUL:%.*]] = fmul float uitofp (i1 icmp eq (i16* getelementptr inbounds (i16, i16* @g2, i64 1), i16* @g1) to float), uitofp (i1 icmp eq (i16* getelementptr inbounds (i16, i16* @g2, i64 1), i16* @g1) to float)
+; CHECK-NEXT: [[FMUL:%.*]] = fmul float uitofp (i1 icmp eq (ptr getelementptr inbounds (i16, ptr @g2, i64 1), ptr @g1) to float), uitofp (i1 icmp eq (ptr getelementptr inbounds (i16, ptr @g2, i64 1), ptr @g1) to float)
; CHECK-NEXT: ret float [[FMUL]]
;
- %fmul = fmul float uitofp (i1 icmp eq (i16* getelementptr inbounds (i16, i16* @g2, i64 1), i16* @g1) to float), uitofp (i1 icmp eq (i16* getelementptr inbounds (i16, i16* @g2, i64 1), i16* @g1) to float)
+ %fmul = fmul float uitofp (i1 icmp eq (ptr getelementptr inbounds (i16, ptr @g2, i64 1), ptr @g1) to float), uitofp (i1 icmp eq (ptr getelementptr inbounds (i16, ptr @g2, i64 1), ptr @g1) to float)
%call = call float @fabsf(float %fmul)
ret float %call
}
; CHECK-LABEL: @patatino(
; CHECK-NEXT: ret <2 x i16> zeroinitializer
;
- %tmp2 = getelementptr inbounds [1 x i16], [1 x i16]* null, i16 0, <2 x i16> undef
- %tmp3 = ptrtoint <2 x i16*> %tmp2 to <2 x i16>
+ %tmp2 = getelementptr inbounds [1 x i16], ptr null, i16 0, <2 x i16> undef
+ %tmp3 = ptrtoint <2 x ptr> %tmp2 to <2 x i16>
ret <2 x i16> %tmp3
}
; CHECK-LABEL: @func_24(
define i40 @func_24() {
entry:
- %bf.load81 = load i40, i40* bitcast ({ i8, i8, i8, i8, i8 }* @g_49 to i40*), align 2
+ %bf.load81 = load i40, ptr @g_49, align 2
%bf.clear = and i40 %bf.load81, -274869518337
- %bf.set = or i40 %bf.clear, shl (i40 zext (i1 icmp sgt (i32 zext (i1 icmp eq (i8* getelementptr inbounds ([6 x i8], [6 x i8]* @g_461, i64 0, i64 2), i8* @g_40) to i32), i32 0) to i40), i40 23)
+ %bf.set = or i40 %bf.clear, shl (i40 zext (i1 icmp sgt (i32 zext (i1 icmp eq (ptr getelementptr inbounds ([6 x i8], ptr @g_461, i64 0, i64 2), ptr @g_40) to i32), i32 0) to i40), i40 23)
%tmp = lshr i40 %bf.set, 23
%tmp1 = trunc i40 %tmp to i32
%tmp2 = and i32 1, %tmp1
; CHECK-NEXT: ret <4 x i1> <i1 true, i1 true, i1 true, i1 true>
;
entry:
- %0 = load i16, i16* getelementptr ([4 x i16], [4 x i16]* @offsets, i16 0, i16 0), align 1
+ %0 = load i16, ptr @offsets, align 1
%1 = insertelement <4 x i16> poison, i16 %0, i32 3
- %2 = getelementptr i32, i32* null, <4 x i16> %1
- %3 = getelementptr i32, i32* null, <4 x i16> %1
- %4 = icmp eq <4 x i32*> %2, %3
+ %2 = getelementptr i32, ptr null, <4 x i16> %1
+ %3 = getelementptr i32, ptr null, <4 x i16> %1
+ %4 = icmp eq <4 x ptr> %2, %3
ret <4 x i1> %4
}
define <4 x i1> @PR38984_2() {
; CHECK-LABEL: @PR38984_2(
; CHECK-NEXT: entry:
-; CHECK-NEXT: [[TMP0:%.*]] = load i16, i16* getelementptr inbounds ([4 x i16], [4 x i16]* @offsets, i16 0, i16 0), align 2
+; CHECK-NEXT: [[TMP0:%.*]] = load i16, ptr @offsets, align 2
; CHECK-NEXT: [[TMP1:%.*]] = insertelement <4 x i16> poison, i16 [[TMP0]], i64 3
-; CHECK-NEXT: [[TMP2:%.*]] = getelementptr i16, i16* getelementptr inbounds ([21 x i16], [21 x i16]* @a, i16 1, i16 0), <4 x i16> [[TMP1]]
-; CHECK-NEXT: [[TMP3:%.*]] = getelementptr i16, i16* null, <4 x i16> [[TMP1]]
-; CHECK-NEXT: [[TMP4:%.*]] = icmp eq <4 x i16*> [[TMP2]], [[TMP3]]
+; CHECK-NEXT: [[TMP2:%.*]] = getelementptr i16, ptr getelementptr inbounds ([21 x i16], ptr @a, i16 1, i16 0), <4 x i16> [[TMP1]]
+; CHECK-NEXT: [[TMP3:%.*]] = getelementptr i16, ptr null, <4 x i16> [[TMP1]]
+; CHECK-NEXT: [[TMP4:%.*]] = icmp eq <4 x ptr> [[TMP2]], [[TMP3]]
; CHECK-NEXT: ret <4 x i1> [[TMP4]]
;
entry:
- %0 = load i16, i16* getelementptr ([4 x i16], [4 x i16]* @offsets, i16 0, i16 0)
+ %0 = load i16, ptr @offsets
%1 = insertelement <4 x i16> poison, i16 %0, i32 3
- %2 = getelementptr i16, i16* getelementptr ([21 x i16], [21 x i16]* @a, i64 1, i32 0), <4 x i16> %1
- %3 = getelementptr i16, i16* null, <4 x i16> %1
- %4 = icmp eq <4 x i16*> %2, %3
+ %2 = getelementptr i16, ptr getelementptr ([21 x i16], ptr @a, i64 1, i32 0), <4 x i16> %1
+ %3 = getelementptr i16, ptr null, <4 x i16> %1
+ %4 = icmp eq <4 x ptr> %2, %3
ret <4 x i1> %4
}
; CHECK-NEXT: ret <4 x i1> <i1 true, i1 true, i1 true, i1 true>
;
entry:
- %0 = load i16, i16* getelementptr ([4 x i16], [4 x i16]* @offsets, i16 0, i16 0), align 1
+ %0 = load i16, ptr @offsets, align 1
%1 = insertelement <4 x i16> undef, i16 %0, i32 3
- %2 = getelementptr i32, i32* null, <4 x i16> %1
- %3 = getelementptr i32, i32* null, <4 x i16> %1
- %4 = icmp eq <4 x i32*> %2, %3
+ %2 = getelementptr i32, ptr null, <4 x i16> %1
+ %3 = getelementptr i32, ptr null, <4 x i16> %1
+ %4 = icmp eq <4 x ptr> %2, %3
ret <4 x i1> %4
}
define <4 x i1> @PR38984_2() {
; CHECK-LABEL: @PR38984_2(
; CHECK-NEXT: entry:
-; CHECK-NEXT: [[TMP0:%.*]] = load i16, i16* getelementptr inbounds ([4 x i16], [4 x i16]* @offsets, i16 0, i16 0), align 2
+; CHECK-NEXT: [[TMP0:%.*]] = load i16, ptr @offsets, align 2
; CHECK-NEXT: [[TMP1:%.*]] = insertelement <4 x i16> undef, i16 [[TMP0]], i64 3
-; CHECK-NEXT: [[TMP2:%.*]] = getelementptr i16, i16* getelementptr inbounds ([21 x i16], [21 x i16]* @a, i16 1, i16 0), <4 x i16> [[TMP1]]
-; CHECK-NEXT: [[TMP3:%.*]] = getelementptr i16, i16* null, <4 x i16> [[TMP1]]
-; CHECK-NEXT: [[TMP4:%.*]] = icmp eq <4 x i16*> [[TMP2]], [[TMP3]]
+; CHECK-NEXT: [[TMP2:%.*]] = getelementptr i16, ptr getelementptr inbounds ([21 x i16], ptr @a, i16 1, i16 0), <4 x i16> [[TMP1]]
+; CHECK-NEXT: [[TMP3:%.*]] = getelementptr i16, ptr null, <4 x i16> [[TMP1]]
+; CHECK-NEXT: [[TMP4:%.*]] = icmp eq <4 x ptr> [[TMP2]], [[TMP3]]
; CHECK-NEXT: ret <4 x i1> [[TMP4]]
;
entry:
- %0 = load i16, i16* getelementptr ([4 x i16], [4 x i16]* @offsets, i16 0, i16 0)
+ %0 = load i16, ptr @offsets
%1 = insertelement <4 x i16> undef, i16 %0, i32 3
- %2 = getelementptr i16, i16* getelementptr ([21 x i16], [21 x i16]* @a, i64 1, i32 0), <4 x i16> %1
- %3 = getelementptr i16, i16* null, <4 x i16> %1
- %4 = icmp eq <4 x i16*> %2, %3
+ %2 = getelementptr i16, ptr getelementptr ([21 x i16], ptr @a, i64 1, i32 0), <4 x i16> %1
+ %3 = getelementptr i16, ptr null, <4 x i16> %1
+ %4 = icmp eq <4 x ptr> %2, %3
ret <4 x i1> %4
}
; Check that SimplifyLibCalls do not (crash or) emit a library call if user
; has made a function alias with the same name.
-%struct._IO_FILE = type { i32, i8*, i8*, i8*, i8*, i8*, i8*, i8*, i8*, i8*, i8*, i8*, %struct._IO_marker*, %struct._IO_FILE*, i32, i32, i64, i16, i8, [1 x i8], i8*, i64, i8*, i8*, i8*, i8*, i64, i32, [20 x i8] }
-%struct._IO_marker = type { %struct._IO_marker*, %struct._IO_FILE*, i32 }
+%struct._IO_FILE = type { i32, ptr, ptr, ptr, ptr, ptr, ptr, ptr, ptr, ptr, ptr, ptr, ptr, ptr, i32, i32, i64, i16, i8, [1 x i8], ptr, i64, ptr, ptr, ptr, ptr, i64, i32, [20 x i8] }
+%struct._IO_marker = type { ptr, ptr, i32 }
-@stderr = external global %struct._IO_FILE*, align 8
+@stderr = external global ptr, align 8
@.str = private constant [8 x i8] c"crash!\0A\00", align 1
-@fwrite = alias i64 (i8*, i64, i64, %struct._IO_FILE*), i64 (i8*, i64, i64, %struct._IO_FILE*)* @__fwrite_alias
+@fwrite = alias i64 (ptr, i64, i64, ptr), ptr @__fwrite_alias
-define i64 @__fwrite_alias(i8* %ptr, i64 %size, i64 %n, %struct._IO_FILE* %s) {
+define i64 @__fwrite_alias(ptr %ptr, i64 %size, i64 %n, ptr %s) {
; CHECK-LABEL: @__fwrite_alias(
; CHECK-NEXT: entry:
; CHECK-NEXT: ret i64 0
;
entry:
- %ptr.addr = alloca i8*, align 8
+ %ptr.addr = alloca ptr, align 8
%size.addr = alloca i64, align 8
%n.addr = alloca i64, align 8
- %s.addr = alloca %struct._IO_FILE*, align 8
- store i8* %ptr, i8** %ptr.addr, align 8
- store i64 %size, i64* %size.addr, align 8
- store i64 %n, i64* %n.addr, align 8
- store %struct._IO_FILE* %s, %struct._IO_FILE** %s.addr, align 8
+ %s.addr = alloca ptr, align 8
+ store ptr %ptr, ptr %ptr.addr, align 8
+ store i64 %size, ptr %size.addr, align 8
+ store i64 %n, ptr %n.addr, align 8
+ store ptr %s, ptr %s.addr, align 8
ret i64 0
}
;
entry:
%retval = alloca i32, align 4
- store i32 0, i32* %retval, align 4
- %0 = load %struct._IO_FILE*, %struct._IO_FILE** @stderr, align 8
- %call = call i32 (%struct._IO_FILE*, i8*, ...) @fprintf(%struct._IO_FILE* %0, i8* getelementptr inbounds ([8 x i8], [8 x i8]* @.str, i32 0, i32 0))
+ store i32 0, ptr %retval, align 4
+ %0 = load ptr, ptr @stderr, align 8
+ %call = call i32 (ptr, ptr, ...) @fprintf(ptr %0, ptr @.str)
ret void
}
-declare i32 @fprintf(%struct._IO_FILE*, i8*, ...)
+declare i32 @fprintf(ptr, ptr, ...)
define i64 @_Z8wyhash64v() {
; CHECK-LABEL: @_Z8wyhash64v(
-; CHECK-NEXT: [[TMP1:%.*]] = load i64, i64* @wyhash64_x, align 8
+; CHECK-NEXT: [[TMP1:%.*]] = load i64, ptr @wyhash64_x, align 8
; CHECK-NEXT: [[TMP2:%.*]] = add i64 [[TMP1]], 6971258582664805397
-; CHECK-NEXT: store i64 [[TMP2]], i64* @wyhash64_x, align 8
+; CHECK-NEXT: store i64 [[TMP2]], ptr @wyhash64_x, align 8
; CHECK-NEXT: [[TMP3:%.*]] = zext i64 [[TMP2]] to i128
; CHECK-NEXT: [[TMP4:%.*]] = mul nuw i128 [[TMP3]], 11795372955171141389
; CHECK-NEXT: [[TMP5:%.*]] = lshr i128 [[TMP4]], 64
; CHECK-NEXT: [[TMP10:%.*]] = trunc i128 [[TMP9]] to i64
; CHECK-NEXT: ret i64 [[TMP10]]
;
- %1 = load i64, i64* @wyhash64_x, align 8
+ %1 = load i64, ptr @wyhash64_x, align 8
%2 = add i64 %1, 6971258582664805397
- store i64 %2, i64* @wyhash64_x, align 8
+ store i64 %2, ptr @wyhash64_x, align 8
%3 = zext i64 %2 to i128
%4 = mul i128 %3, 11795372955171141389
%5 = lshr i128 %4, 64
target datalayout = "e-p:64:64:64-i1:8:8-i8:8:8-i16:16:16-i32:32:32-i64:64:64-f32:32:32-f64:64:64-v64:64:64-v128:128:128-a0:0:64-s0:64:64-f80:128:128-n8:16:32:64-S128"
-declare i8* @strchr(i8*, i32)
+declare ptr @strchr(ptr, i32)
-define i8* @pr43081(i8* %a) {
+define ptr @pr43081(ptr %a) {
entry:
- %a.addr = alloca i8*, align 8
- store i8* %a, i8** %a.addr, align 8
- %0 = load i8*, i8** %a.addr, align 8
- %call = call i8* @strchr(i8* %0, i32 0)
- ret i8* %call
-; CHECK: call i8* @strchr
+ %a.addr = alloca ptr, align 8
+ store ptr %a, ptr %a.addr, align 8
+ %0 = load ptr, ptr %a.addr, align 8
+ %call = call ptr @strchr(ptr %0, i32 0)
+ ret ptr %call
+; CHECK: call ptr @strchr
}
; simplified. But that depends on the worklist order, so that is not always
; guaranteed.
-define i16 @d(i16* %d.a, i16* %d.b) {
+define i16 @d(ptr %d.a, ptr %d.b) {
; CHECK-LABEL: @d(
; CHECK-NEXT: entry:
-; CHECK-NEXT: [[T0:%.*]] = load i16, i16* [[D_A:%.*]], align 1
+; CHECK-NEXT: [[T0:%.*]] = load i16, ptr [[D_A:%.*]], align 1
; CHECK-NEXT: [[TOBOOL:%.*]] = icmp eq i16 [[T0]], 0
; CHECK-NEXT: br i1 [[TOBOOL]], label [[LAND_END:%.*]], label [[LAND_RHS:%.*]]
; CHECK: land.rhs:
; CHECK-NEXT: ret i16 -1
;
entry:
- %t0 = load i16, i16* %d.a, align 1
+ %t0 = load i16, ptr %d.a, align 1
%tobool = icmp ne i16 %t0, 0
br i1 %tobool, label %land.rhs, label %land.end
land.rhs:
- %t1 = load i16, i16* %d.b, align 1
+ %t1 = load i16, ptr %d.b, align 1
%cmp = icmp ult i16 %t1, 0
br label %land.end
define dso_local i32 @main() !dbg !13 {
entry:
- %0 = load i8, i8* @a, align 1, !dbg !17
+ %0 = load i8, ptr @a, align 1, !dbg !17
%dec = add i8 %0, -1, !dbg !17
- store i8 %dec, i8* @a, align 1, !dbg !17
+ store i8 %dec, ptr @a, align 1, !dbg !17
;CHECK: call void @llvm.dbg.value(metadata i32 undef
;CHECK: call void @llvm.dbg.value(metadata i32 -8
;CHECK: call void @llvm.dbg.value(metadata i32 undef
call void @llvm.dbg.value(metadata i32 %udiv, metadata !18, metadata !DIExpression()), !dbg !19
call void @llvm.dbg.value(metadata i32 -8, metadata !20, metadata !DIExpression()), !dbg !19
call void @llvm.dbg.value(metadata i32 %udiv, metadata !20, metadata !DIExpression()), !dbg !19
- store i8 0, i8* @b, align 1, !dbg !21
+ store i8 0, ptr @b, align 1, !dbg !21
%cmp = icmp sgt i32 %conv, 0, !dbg !22
%conv1 = zext i1 %cmp to i32, !dbg !22
ret i32 0, !dbg !23
define i16 @main() {
; CHECK-LABEL: @main(
; CHECK-NEXT: entry:
-; CHECK-NEXT: store i64 0, i64* @csmith_sink_, align 8
+; CHECK-NEXT: store i64 0, ptr @csmith_sink_, align 8
; CHECK-NEXT: ret i16 0
;
entry:
- store i64 0, i64* @csmith_sink_, align 1
- %0 = load i16, i16* @g_313_0, align 1
+ store i64 0, ptr @csmith_sink_, align 1
+ %0 = load i16, ptr @g_313_0, align 1
%conv2 = sext i16 %0 to i64
- store i64 %conv2, i64* @csmith_sink_, align 1
- %1 = load i32, i32* @g_313_1, align 1
+ store i64 %conv2, ptr @csmith_sink_, align 1
+ %1 = load i32, ptr @g_313_1, align 1
%conv3 = zext i32 %1 to i64
- store i64 %conv3, i64* @csmith_sink_, align 1
- %2 = load i32, i32* @g_313_2, align 1
+ store i64 %conv3, ptr @csmith_sink_, align 1
+ %2 = load i32, ptr @g_313_2, align 1
%conv4 = sext i32 %2 to i64
- store i64 %conv4, i64* @csmith_sink_, align 1
- %3 = load i32, i32* @g_313_3, align 1
+ store i64 %conv4, ptr @csmith_sink_, align 1
+ %3 = load i32, ptr @g_313_3, align 1
%conv5 = zext i32 %3 to i64
- store i64 %conv5, i64* @csmith_sink_, align 1
- %4 = load i16, i16* @g_313_4, align 1
+ store i64 %conv5, ptr @csmith_sink_, align 1
+ %4 = load i16, ptr @g_313_4, align 1
%conv6 = sext i16 %4 to i64
- store i64 %conv6, i64* @csmith_sink_, align 1
- %5 = load i16, i16* @g_313_5, align 1
+ store i64 %conv6, ptr @csmith_sink_, align 1
+ %5 = load i16, ptr @g_313_5, align 1
%conv7 = sext i16 %5 to i64
- store i64 %conv7, i64* @csmith_sink_, align 1
- %6 = load i16, i16* @g_313_6, align 1
+ store i64 %conv7, ptr @csmith_sink_, align 1
+ %6 = load i16, ptr @g_313_6, align 1
%conv8 = sext i16 %6 to i64
- store i64 %conv8, i64* @csmith_sink_, align 1
- %7 = load i64, i64* getelementptr inbounds (%struct.S3, %struct.S3* @g_316, i32 0, i32 0), align 1
- store i64 %7, i64* @csmith_sink_, align 1
- %8 = load i16, i16* @g_316_1_0, align 1
+ store i64 %conv8, ptr @csmith_sink_, align 1
+ %7 = load i64, ptr @g_316, align 1
+ store i64 %7, ptr @csmith_sink_, align 1
+ %8 = load i16, ptr @g_316_1_0, align 1
%conv9 = sext i16 %8 to i64
- store i64 %conv9, i64* @csmith_sink_, align 1
- store i64 0, i64* @csmith_sink_, align 1
+ store i64 %conv9, ptr @csmith_sink_, align 1
+ store i64 0, ptr @csmith_sink_, align 1
ret i16 0
}
; This test used to cause an infinite loop in the load/store min/max bitcast
; transform.
-define void @test(i32* %p, i32* %p2) {
+define void @test(ptr %p, ptr %p2) {
; CHECK-LABEL: @test(
-; CHECK-NEXT: [[V:%.*]] = load i32, i32* [[P:%.*]], align 4
-; CHECK-NEXT: [[V2:%.*]] = load i32, i32* [[P2:%.*]], align 4
+; CHECK-NEXT: [[V:%.*]] = load i32, ptr [[P:%.*]], align 4
+; CHECK-NEXT: [[V2:%.*]] = load i32, ptr [[P2:%.*]], align 4
; CHECK-NEXT: [[TMP1:%.*]] = call i32 @llvm.umin.i32(i32 [[V2]], i32 [[V]])
-; CHECK-NEXT: store i32 [[TMP1]], i32* [[P]], align 4
+; CHECK-NEXT: store i32 [[TMP1]], ptr [[P]], align 4
; CHECK-NEXT: ret void
;
- %v = load i32, i32* %p, align 4
- %v2 = load i32, i32* %p2, align 4
+ %v = load i32, ptr %p, align 4
+ %v2 = load i32, ptr %p2, align 4
%cmp = icmp ult i32 %v2, %v
- %sel = select i1 %cmp, i32* %p2, i32* %p
- %p8 = bitcast i32* %p to i8*
- %sel8 = bitcast i32* %sel to i8*
- call void @llvm.memcpy.p0i8.p0i8.i64(i8* align 4 %p8, i8* align 4 %sel8, i64 4, i1 false)
+ %sel = select i1 %cmp, ptr %p2, ptr %p
+ call void @llvm.memcpy.p0.p0.i64(ptr align 4 %p, ptr align 4 %sel, i64 4, i1 false)
ret void
}
; Function Attrs: argmemonly nounwind willreturn
-declare void @llvm.memcpy.p0i8.p0i8.i64(i8* noalias nocapture writeonly, i8* noalias nocapture readonly, i64, i1 immarg) #0
+declare void @llvm.memcpy.p0.p0.i64(ptr noalias nocapture writeonly, ptr noalias nocapture readonly, i64, i1 immarg) #0
attributes #0 = { argmemonly nounwind willreturn }
@d = dso_local local_unnamed_addr global i64 0, align 8
@c = external dso_local local_unnamed_addr global i8, align 1
-define void @test(i16* nocapture readonly %arg) local_unnamed_addr {
+define void @test(ptr nocapture readonly %arg) local_unnamed_addr {
; CHECK-LABEL: @test(
; CHECK-NEXT: bb:
-; CHECK-NEXT: [[I:%.*]] = load i64, i64* @d, align 8
+; CHECK-NEXT: [[I:%.*]] = load i64, ptr @d, align 8
; CHECK-NEXT: [[I1:%.*]] = icmp eq i64 [[I]], 0
-; CHECK-NEXT: [[I2:%.*]] = load i64, i64* @a, align 8
+; CHECK-NEXT: [[I2:%.*]] = load i64, ptr @a, align 8
; CHECK-NEXT: [[I3:%.*]] = icmp ne i64 [[I2]], 0
; CHECK-NEXT: br i1 [[I1]], label [[BB13:%.*]], label [[BB4:%.*]]
; CHECK: bb4:
-; CHECK-NEXT: [[I5:%.*]] = load i16, i16* [[ARG:%.*]], align 2
+; CHECK-NEXT: [[I5:%.*]] = load i16, ptr [[ARG:%.*]], align 2
; CHECK-NEXT: [[I6:%.*]] = trunc i16 [[I5]] to i8
-; CHECK-NEXT: store i8 [[I6]], i8* @c, align 1
+; CHECK-NEXT: store i8 [[I6]], ptr @c, align 1
; CHECK-NEXT: tail call void @llvm.assume(i1 [[I3]])
; CHECK-NEXT: br label [[BB22:%.*]]
; CHECK: bb13:
-; CHECK-NEXT: [[I14:%.*]] = load i16, i16* [[ARG]], align 2
+; CHECK-NEXT: [[I14:%.*]] = load i16, ptr [[ARG]], align 2
; CHECK-NEXT: [[I15:%.*]] = trunc i16 [[I14]] to i8
-; CHECK-NEXT: store i8 [[I15]], i8* @c, align 1
+; CHECK-NEXT: store i8 [[I15]], ptr @c, align 1
; CHECK-NEXT: br label [[BB22]]
; CHECK: bb22:
-; CHECK-NEXT: [[STOREMERGE2_IN:%.*]] = load i16, i16* [[ARG]], align 2
+; CHECK-NEXT: [[STOREMERGE2_IN:%.*]] = load i16, ptr [[ARG]], align 2
; CHECK-NEXT: [[STOREMERGE2:%.*]] = trunc i16 [[STOREMERGE2_IN]] to i8
-; CHECK-NEXT: store i8 [[STOREMERGE2]], i8* @c, align 1
-; CHECK-NEXT: [[STOREMERGE1_IN:%.*]] = load i16, i16* [[ARG]], align 2
+; CHECK-NEXT: store i8 [[STOREMERGE2]], ptr @c, align 1
+; CHECK-NEXT: [[STOREMERGE1_IN:%.*]] = load i16, ptr [[ARG]], align 2
; CHECK-NEXT: [[STOREMERGE1:%.*]] = trunc i16 [[STOREMERGE1_IN]] to i8
-; CHECK-NEXT: store i8 [[STOREMERGE1]], i8* @c, align 1
-; CHECK-NEXT: [[STOREMERGE_IN:%.*]] = load i16, i16* [[ARG]], align 2
+; CHECK-NEXT: store i8 [[STOREMERGE1]], ptr @c, align 1
+; CHECK-NEXT: [[STOREMERGE_IN:%.*]] = load i16, ptr [[ARG]], align 2
; CHECK-NEXT: [[STOREMERGE:%.*]] = trunc i16 [[STOREMERGE_IN]] to i8
-; CHECK-NEXT: store i8 [[STOREMERGE]], i8* @c, align 1
+; CHECK-NEXT: store i8 [[STOREMERGE]], ptr @c, align 1
; CHECK-NEXT: br label [[BB23:%.*]]
; CHECK: bb23:
; CHECK-NEXT: br label [[BB23]]
;
bb:
- %i = load i64, i64* @d, align 8
+ %i = load i64, ptr @d, align 8
%i1 = icmp eq i64 %i, 0
- %i2 = load i64, i64* @a, align 8
+ %i2 = load i64, ptr @a, align 8
%i3 = icmp ne i64 %i2, 0
br i1 %i1, label %bb13, label %bb4
bb4: ; preds = %bb
- %i5 = load i16, i16* %arg, align 2
+ %i5 = load i16, ptr %arg, align 2
%i6 = trunc i16 %i5 to i8
- store i8 %i6, i8* @c, align 1
+ store i8 %i6, ptr @c, align 1
tail call void @llvm.assume(i1 %i3)
- %i7 = load i16, i16* %arg, align 2
+ %i7 = load i16, ptr %arg, align 2
%i8 = trunc i16 %i7 to i8
- store i8 %i8, i8* @c, align 1
- %i9 = load i16, i16* %arg, align 2
+ store i8 %i8, ptr @c, align 1
+ %i9 = load i16, ptr %arg, align 2
%i10 = trunc i16 %i9 to i8
- store i8 %i10, i8* @c, align 1
- %i11 = load i16, i16* %arg, align 2
+ store i8 %i10, ptr @c, align 1
+ %i11 = load i16, ptr %arg, align 2
%i12 = trunc i16 %i11 to i8
- store i8 %i12, i8* @c, align 1
+ store i8 %i12, ptr @c, align 1
br label %bb22
bb13: ; preds = %bb
- %i14 = load i16, i16* %arg, align 2
+ %i14 = load i16, ptr %arg, align 2
%i15 = trunc i16 %i14 to i8
- store i8 %i15, i8* @c, align 1
- %i16 = load i16, i16* %arg, align 2
+ store i8 %i15, ptr @c, align 1
+ %i16 = load i16, ptr %arg, align 2
%i17 = trunc i16 %i16 to i8
- store i8 %i17, i8* @c, align 1
- %i18 = load i16, i16* %arg, align 2
+ store i8 %i17, ptr @c, align 1
+ %i18 = load i16, ptr %arg, align 2
%i19 = trunc i16 %i18 to i8
- store i8 %i19, i8* @c, align 1
- %i20 = load i16, i16* %arg, align 2
+ store i8 %i19, ptr @c, align 1
+ %i20 = load i16, ptr %arg, align 2
%i21 = trunc i16 %i20 to i8
- store i8 %i21, i8* @c, align 1
+ store i8 %i21, ptr @c, align 1
br label %bb22
bb22: ; preds = %bb13, %bb4
; RUN: opt < %s -passes=instcombine -S | FileCheck %s
; OSS-Fuzz: https://bugs.chromium.org/p/oss-fuzz/issues/detail?id=38057
-define void @PR51824(<4 x i16> %idxs, i32* %ptr, i1 %c1, <4 x i32>* %ptr2) {
+define void @PR51824(<4 x i16> %idxs, ptr %ptr, i1 %c1, ptr %ptr2) {
; CHECK-LABEL: @PR51824(
; CHECK-NEXT: entry:
; CHECK-NEXT: br label [[BB:%.*]]
%E9 = extractelement <4 x i16> zeroinitializer, i16 %B2
%I2 = insertelement <4 x i16> poison, i16 %E9, i16 0
%i = sext <4 x i16> %I2 to <4 x i32>
- %i1 = getelementptr inbounds i64, i64* null, <4 x i32> %i
- %i2 = ptrtoint <4 x i64*> %i1 to <4 x i32>
+ %i1 = getelementptr inbounds i64, ptr null, <4 x i32> %i
+ %i2 = ptrtoint <4 x ptr> %i1 to <4 x i32>
%E2 = extractelement <4 x i32> %i2, i16 0
br label %BB
BB: ; preds = %BB, %entry
%A15 = alloca <4 x i32>, align 16
- %L2 = load <4 x i32>, <4 x i32>* %A15, align 16
- %G1 = getelementptr i64, i64* null, i32 %E2
- %i3 = getelementptr inbounds i64, i64* %G1, <4 x i16> %idxs
- %i4 = ptrtoint <4 x i64*> %i3 to <4 x i32>
+ %L2 = load <4 x i32>, ptr %A15, align 16
+ %G1 = getelementptr i64, ptr null, i32 %E2
+ %i3 = getelementptr inbounds i64, ptr %G1, <4 x i16> %idxs
+ %i4 = ptrtoint <4 x ptr> %i3 to <4 x i32>
%E22 = extractelement <4 x i32> %L2, i1 false
%E8 = extractelement <4 x i32> %i4, i1 false
%I10 = insertelement <4 x i32> undef, i32 undef, i32 %E8
%S7 = shufflevector <4 x i32> %I19, <4 x i32> %L2, <4 x i32> poison
%I8 = insertelement <4 x i32> %I19, i32 0, i1 %C1
%E10 = extractelement <4 x i32> %I8, i1 poison
- store i32 %E10, i32* %ptr, align 4
+ store i32 %E10, ptr %ptr, align 4
br i1 %c1, label %BB, label %BB1
BB1: ; preds = %BB
%S8 = shufflevector <4 x i32> %I10, <4 x i32> %S7, <4 x i32> undef
- store <4 x i32> %S8, <4 x i32>* %ptr2, align 16
+ store <4 x i32> %S8, ptr %ptr2, align 16
ret void
}
; RUN: opt < %s -passes=instcombine -S | FileCheck %s
-%struct.C = type { %struct.C*, i32 }
+%struct.C = type { ptr, i32 }
; Check that we instcombine the load across the prefetch.
; CHECK-LABEL: define signext i32 @foo
-define signext i32 @foo(%struct.C* %c) local_unnamed_addr #0 {
-; CHECK: store i32 %dec, i32* %length_
+define signext i32 @foo(ptr %c) local_unnamed_addr #0 {
+; CHECK: store i32 %dec, ptr %length_
; CHECK-NOT: load
; CHECK: llvm.prefetch
; CHECK-NEXT: ret
entry:
- %next_ = getelementptr inbounds %struct.C, %struct.C* %c, i32 0, i32 0
- %0 = load %struct.C*, %struct.C** %next_, align 8
- %next_1 = getelementptr inbounds %struct.C, %struct.C* %0, i32 0, i32 0
- %1 = load %struct.C*, %struct.C** %next_1, align 8
- store %struct.C* %1, %struct.C** %next_, align 8
- %length_ = getelementptr inbounds %struct.C, %struct.C* %c, i32 0, i32 1
- %2 = load i32, i32* %length_, align 8
+ %0 = load ptr, ptr %c, align 8
+ %1 = load ptr, ptr %0, align 8
+ store ptr %1, ptr %c, align 8
+ %length_ = getelementptr inbounds %struct.C, ptr %c, i32 0, i32 1
+ %2 = load i32, ptr %length_, align 8
%dec = add nsw i32 %2, -1
- store i32 %dec, i32* %length_, align 8
- %3 = bitcast %struct.C* %1 to i8*
- call void @llvm.prefetch(i8* %3, i32 0, i32 0, i32 1)
- %4 = load i32, i32* %length_, align 8
- ret i32 %4
+ store i32 %dec, ptr %length_, align 8
+ call void @llvm.prefetch(ptr %1, i32 0, i32 0, i32 1)
+ %3 = load i32, ptr %length_, align 8
+ ret i32 %3
}
; Function Attrs: inaccessiblemem_or_argmemonly nounwind
-declare void @llvm.prefetch(i8* nocapture readonly, i32, i32, i32)
+declare void @llvm.prefetch(ptr nocapture readonly, i32, i32, i32)
attributes #0 = { noinline nounwind }
; We've explicitly removed the function attrs from llvm.prefetch so we get the defaults.
; RUN: opt < %s -passes=instcombine -S | FileCheck %s
target datalayout = "E-p:64:64:64-a0:0:8-f32:32:32-f64:64:64-i1:8:8-i8:8:8-i16:16:16-i32:32:32-i64:32:64-v64:64:64-v128:128:128"
-define i1 @test1(i32 *%x) nounwind {
+define i1 @test1(ptr %x) nounwind {
; CHECK-LABEL: @test1(
; CHECK-NEXT: entry:
-; CHECK-NEXT: [[TMP0:%.*]] = ptrtoint i32* [[X:%.*]] to i64
+; CHECK-NEXT: [[TMP0:%.*]] = ptrtoint ptr [[X:%.*]] to i64
; CHECK-NEXT: [[TMP1:%.*]] = and i64 [[TMP0]], 1
; CHECK-NEXT: [[TMP2:%.*]] = icmp ne i64 [[TMP1]], 0
; CHECK-NEXT: ret i1 [[TMP2]]
;
entry:
- %0 = ptrtoint i32* %x to i1
+ %0 = ptrtoint ptr %x to i1
ret i1 %0
}
-define i32* @test2(i128 %x) nounwind {
+define ptr @test2(i128 %x) nounwind {
; CHECK-LABEL: @test2(
; CHECK-NEXT: entry:
; CHECK-NEXT: [[TMP0:%.*]] = trunc i128 [[X:%.*]] to i64
-; CHECK-NEXT: [[TMP1:%.*]] = inttoptr i64 [[TMP0]] to i32*
-; CHECK-NEXT: ret i32* [[TMP1]]
+; CHECK-NEXT: [[TMP1:%.*]] = inttoptr i64 [[TMP0]] to ptr
+; CHECK-NEXT: ret ptr [[TMP1]]
;
entry:
- %0 = inttoptr i128 %x to i32*
- ret i32* %0
+ %0 = inttoptr i128 %x to ptr
+ ret ptr %0
}
; PR3574
; CHECK-NEXT: [[TMP1:%.*]] = zext i32 [[A0:%.*]] to i64
; CHECK-NEXT: ret i64 [[TMP1]]
;
- %t0 = inttoptr i32 %a0 to i8*
- %t1 = ptrtoint i8* %t0 to i64
+ %t0 = inttoptr i32 %a0 to ptr
+ %t1 = ptrtoint ptr %t0 to i64
ret i64 %t1
}
-define <4 x i32> @test4(<4 x i8*> %arg) nounwind {
+define <4 x i32> @test4(<4 x ptr> %arg) nounwind {
; CHECK-LABEL: @test4(
-; CHECK-NEXT: [[TMP1:%.*]] = ptrtoint <4 x i8*> [[ARG:%.*]] to <4 x i64>
+; CHECK-NEXT: [[TMP1:%.*]] = ptrtoint <4 x ptr> [[ARG:%.*]] to <4 x i64>
; CHECK-NEXT: [[P1:%.*]] = trunc <4 x i64> [[TMP1]] to <4 x i32>
; CHECK-NEXT: ret <4 x i32> [[P1]]
;
- %p1 = ptrtoint <4 x i8*> %arg to <4 x i32>
+ %p1 = ptrtoint <4 x ptr> %arg to <4 x i32>
ret <4 x i32> %p1
}
-define <vscale x 4 x i32> @testvscale4(<vscale x 4 x i8*> %arg) nounwind {
+define <vscale x 4 x i32> @testvscale4(<vscale x 4 x ptr> %arg) nounwind {
; CHECK-LABEL: @testvscale4(
-; CHECK-NEXT: [[TMP1:%.*]] = ptrtoint <vscale x 4 x i8*> [[ARG:%.*]] to <vscale x 4 x i64>
+; CHECK-NEXT: [[TMP1:%.*]] = ptrtoint <vscale x 4 x ptr> [[ARG:%.*]] to <vscale x 4 x i64>
; CHECK-NEXT: [[P1:%.*]] = trunc <vscale x 4 x i64> [[TMP1]] to <vscale x 4 x i32>
; CHECK-NEXT: ret <vscale x 4 x i32> [[P1]]
;
- %p1 = ptrtoint <vscale x 4 x i8*> %arg to <vscale x 4 x i32>
+ %p1 = ptrtoint <vscale x 4 x ptr> %arg to <vscale x 4 x i32>
ret <vscale x 4 x i32> %p1
}
-define <4 x i128> @test5(<4 x i8*> %arg) nounwind {
+define <4 x i128> @test5(<4 x ptr> %arg) nounwind {
; CHECK-LABEL: @test5(
-; CHECK-NEXT: [[TMP1:%.*]] = ptrtoint <4 x i8*> [[ARG:%.*]] to <4 x i64>
+; CHECK-NEXT: [[TMP1:%.*]] = ptrtoint <4 x ptr> [[ARG:%.*]] to <4 x i64>
; CHECK-NEXT: [[P1:%.*]] = zext <4 x i64> [[TMP1]] to <4 x i128>
; CHECK-NEXT: ret <4 x i128> [[P1]]
;
- %p1 = ptrtoint <4 x i8*> %arg to <4 x i128>
+ %p1 = ptrtoint <4 x ptr> %arg to <4 x i128>
ret <4 x i128> %p1
}
-define <4 x i8*> @test6(<4 x i32> %arg) nounwind {
+define <4 x ptr> @test6(<4 x i32> %arg) nounwind {
; CHECK-LABEL: @test6(
; CHECK-NEXT: [[TMP1:%.*]] = zext <4 x i32> [[ARG:%.*]] to <4 x i64>
-; CHECK-NEXT: [[P1:%.*]] = inttoptr <4 x i64> [[TMP1]] to <4 x i8*>
-; CHECK-NEXT: ret <4 x i8*> [[P1]]
+; CHECK-NEXT: [[P1:%.*]] = inttoptr <4 x i64> [[TMP1]] to <4 x ptr>
+; CHECK-NEXT: ret <4 x ptr> [[P1]]
;
- %p1 = inttoptr <4 x i32> %arg to <4 x i8*>
- ret <4 x i8*> %p1
+ %p1 = inttoptr <4 x i32> %arg to <4 x ptr>
+ ret <4 x ptr> %p1
}
-define <4 x i8*> @test7(<4 x i128> %arg) nounwind {
+define <4 x ptr> @test7(<4 x i128> %arg) nounwind {
; CHECK-LABEL: @test7(
; CHECK-NEXT: [[TMP1:%.*]] = trunc <4 x i128> [[ARG:%.*]] to <4 x i64>
-; CHECK-NEXT: [[P1:%.*]] = inttoptr <4 x i64> [[TMP1]] to <4 x i8*>
-; CHECK-NEXT: ret <4 x i8*> [[P1]]
+; CHECK-NEXT: [[P1:%.*]] = inttoptr <4 x i64> [[TMP1]] to <4 x ptr>
+; CHECK-NEXT: ret <4 x ptr> [[P1]]
;
- %p1 = inttoptr <4 x i128> %arg to <4 x i8*>
- ret <4 x i8*> %p1
+ %p1 = inttoptr <4 x i128> %arg to <4 x ptr>
+ ret <4 x ptr> %p1
}
; icmp (inttoptr (ptrtoint p1)), p2 --> icmp p1, p2.
-define i1 @func(i8* %X, i8* %Y) {
+define i1 @func(ptr %X, ptr %Y) {
; CHECK-LABEL: @func(
-; CHECK-NEXT: [[CMP:%.*]] = icmp eq i8* [[X:%.*]], [[Y:%.*]]
+; CHECK-NEXT: [[CMP:%.*]] = icmp eq ptr [[X:%.*]], [[Y:%.*]]
; CHECK-NEXT: ret i1 [[CMP]]
;
- %i = ptrtoint i8* %X to i64
- %p = inttoptr i64 %i to i8*
- %cmp = icmp eq i8* %p, %Y
+ %i = ptrtoint ptr %X to i64
+ %p = inttoptr i64 %i to ptr
+ %cmp = icmp eq ptr %p, %Y
ret i1 %cmp
}
-define i1 @func_pointer_different_types(i16* %X, i8* %Y) {
+define i1 @func_pointer_different_types(ptr %X, ptr %Y) {
; CHECK-LABEL: @func_pointer_different_types(
-; CHECK-NEXT: [[TMP1:%.*]] = bitcast i16* [[X:%.*]] to i8*
-; CHECK-NEXT: [[CMP:%.*]] = icmp eq i8* [[TMP1]], [[Y:%.*]]
+; CHECK-NEXT: [[CMP:%.*]] = icmp eq ptr [[X:%.*]], [[Y:%.*]]
; CHECK-NEXT: ret i1 [[CMP]]
;
- %i = ptrtoint i16* %X to i64
- %p = inttoptr i64 %i to i8*
- %cmp = icmp eq i8* %p, %Y
+ %i = ptrtoint ptr %X to i64
+ %p = inttoptr i64 %i to ptr
+ %cmp = icmp eq ptr %p, %Y
ret i1 %cmp
}
-declare i8* @gen8ptr()
+declare ptr @gen8ptr()
-define i1 @func_commutative(i16* %X) {
+define i1 @func_commutative(ptr %X) {
; CHECK-LABEL: @func_commutative(
-; CHECK-NEXT: [[Y:%.*]] = call i8* @gen8ptr()
-; CHECK-NEXT: [[TMP1:%.*]] = bitcast i16* [[X:%.*]] to i8*
-; CHECK-NEXT: [[CMP:%.*]] = icmp eq i8* [[Y]], [[TMP1]]
+; CHECK-NEXT: [[Y:%.*]] = call ptr @gen8ptr()
+; CHECK-NEXT: [[CMP:%.*]] = icmp eq ptr [[Y]], [[X:%.*]]
; CHECK-NEXT: ret i1 [[CMP]]
;
- %Y = call i8* @gen8ptr() ; thwart complexity-based canonicalization
- %i = ptrtoint i16* %X to i64
- %p = inttoptr i64 %i to i8*
- %cmp = icmp eq i8* %Y, %p
+ %Y = call ptr @gen8ptr() ; thwart complexity-based canonicalization
+ %i = ptrtoint ptr %X to i64
+ %p = inttoptr i64 %i to ptr
+ %cmp = icmp eq ptr %Y, %p
ret i1 %cmp
}
; Negative test - Wrong Integer type.
-define i1 @func_integer_type_too_small(i16* %X, i8* %Y) {
+define i1 @func_integer_type_too_small(ptr %X, ptr %Y) {
; CHECK-LABEL: @func_integer_type_too_small(
-; CHECK-NEXT: [[TMP1:%.*]] = ptrtoint i16* [[X:%.*]] to i64
+; CHECK-NEXT: [[TMP1:%.*]] = ptrtoint ptr [[X:%.*]] to i64
; CHECK-NEXT: [[TMP2:%.*]] = and i64 [[TMP1]], 4294967295
-; CHECK-NEXT: [[P:%.*]] = inttoptr i64 [[TMP2]] to i8*
-; CHECK-NEXT: [[CMP:%.*]] = icmp eq i8* [[P]], [[Y:%.*]]
+; CHECK-NEXT: [[P:%.*]] = inttoptr i64 [[TMP2]] to ptr
+; CHECK-NEXT: [[CMP:%.*]] = icmp eq ptr [[P]], [[Y:%.*]]
; CHECK-NEXT: ret i1 [[CMP]]
;
- %i = ptrtoint i16* %X to i32
- %p = inttoptr i32 %i to i8*
- %cmp = icmp eq i8* %Y, %p
+ %i = ptrtoint ptr %X to i32
+ %p = inttoptr i32 %i to ptr
+ %cmp = icmp eq ptr %Y, %p
ret i1 %cmp
}
; Negative test - Pointers in different address space
-define i1 @func_ptr_different_addrspace(i8* %X, i16 addrspace(3)* %Y){
+define i1 @func_ptr_different_addrspace(ptr %X, ptr addrspace(3) %Y){
; CHECK-LABEL: @func_ptr_different_addrspace(
-; CHECK-NEXT: [[I:%.*]] = ptrtoint i8* [[X:%.*]] to i64
-; CHECK-NEXT: [[P:%.*]] = inttoptr i64 [[I]] to i16 addrspace(3)*
-; CHECK-NEXT: [[CMP:%.*]] = icmp eq i16 addrspace(3)* [[P]], [[Y:%.*]]
+; CHECK-NEXT: [[I:%.*]] = ptrtoint ptr [[X:%.*]] to i64
+; CHECK-NEXT: [[P:%.*]] = inttoptr i64 [[I]] to ptr addrspace(3)
+; CHECK-NEXT: [[CMP:%.*]] = icmp eq ptr addrspace(3) [[P]], [[Y:%.*]]
; CHECK-NEXT: ret i1 [[CMP]]
;
- %i = ptrtoint i8* %X to i64
- %p = inttoptr i64 %i to i16 addrspace(3)*
- %cmp = icmp eq i16 addrspace(3)* %Y, %p
+ %i = ptrtoint ptr %X to i64
+ %p = inttoptr i64 %i to ptr addrspace(3)
+ %cmp = icmp eq ptr addrspace(3) %Y, %p
ret i1 %cmp
}
; Negative test - Pointers in different address space
-define i1 @func_ptr_different_addrspace1(i8 addrspace(2)* %X, i16* %Y){
+define i1 @func_ptr_different_addrspace1(ptr addrspace(2) %X, ptr %Y){
; CHECK-LABEL: @func_ptr_different_addrspace1(
-; CHECK-NEXT: [[TMP1:%.*]] = ptrtoint i8 addrspace(2)* [[X:%.*]] to i32
+; CHECK-NEXT: [[TMP1:%.*]] = ptrtoint ptr addrspace(2) [[X:%.*]] to i32
; CHECK-NEXT: [[I:%.*]] = zext i32 [[TMP1]] to i64
-; CHECK-NEXT: [[P:%.*]] = inttoptr i64 [[I]] to i16*
-; CHECK-NEXT: [[CMP:%.*]] = icmp eq i16* [[P]], [[Y:%.*]]
+; CHECK-NEXT: [[P:%.*]] = inttoptr i64 [[I]] to ptr
+; CHECK-NEXT: [[CMP:%.*]] = icmp eq ptr [[P]], [[Y:%.*]]
; CHECK-NEXT: ret i1 [[CMP]]
;
- %i = ptrtoint i8 addrspace(2)* %X to i64
- %p = inttoptr i64 %i to i16*
- %cmp = icmp eq i16* %Y, %p
+ %i = ptrtoint ptr addrspace(2) %X to i64
+ %p = inttoptr i64 %i to ptr
+ %cmp = icmp eq ptr %Y, %p
ret i1 %cmp
}
; targets with 16-bit int.
declare i16 @putchar(i16)
-declare i16 @puts(i8*)
+declare i16 @puts(ptr)
@empty = constant [1 x i8] c"\00"
; CHECK-NEXT: ret void
;
; Transform puts("") to putchar("\n").
- %s = getelementptr [1 x i8], [1 x i8]* @empty, i32 0, i32 0
- call i16 @puts(i8* %s)
+ call i16 @puts(ptr @empty)
ret void
}
declare void @readnone_but_may_throw() readnone
-define void @f_0(i32* %ptr) {
+define void @f_0(ptr %ptr) {
; CHECK-LABEL: @f_0(
entry:
-; CHECK: store i32 10, i32* %ptr
+; CHECK: store i32 10, ptr %ptr
; CHECK-NEXT: call void @readnone_but_may_throw()
-; CHECK-NEXT: store i32 20, i32* %ptr, align 4
+; CHECK-NEXT: store i32 20, ptr %ptr, align 4
; CHECK: ret void
- store i32 10, i32* %ptr
+ store i32 10, ptr %ptr
call void @readnone_but_may_throw()
- store i32 20, i32* %ptr
+ store i32 20, ptr %ptr
ret void
}
-define void @f_1(i1 %cond, i32* %ptr) {
+define void @f_1(i1 %cond, ptr %ptr) {
; CHECK-LABEL: @f_1(
-; CHECK: store i32 10, i32* %ptr
+; CHECK: store i32 10, ptr %ptr
; CHECK-NEXT: call void @readnone_but_may_throw()
- store i32 10, i32* %ptr
+ store i32 10, ptr %ptr
call void @readnone_but_may_throw()
br i1 %cond, label %left, label %merge
left:
- store i32 20, i32* %ptr
+ store i32 20, ptr %ptr
br label %merge
merge:
; NOTE: Assertions have been autogenerated by utils/update_test_checks.py UTC_ARGS: --scrub-attributes
; RUN: opt -S -passes=instcombine < %s | FileCheck %s
-define dso_local void @_Z3fooPv(i8* nocapture %0) local_unnamed_addr #0 {
+define dso_local void @_Z3fooPv(ptr nocapture %0) local_unnamed_addr #0 {
; CHECK-LABEL: @_Z3fooPv(
-; CHECK-NEXT: tail call void @free(i8* [[TMP0:%.*]])
+; CHECK-NEXT: tail call void @free(ptr [[TMP0:%.*]])
; CHECK-NEXT: ret void
;
- %2 = tail call align 16 dereferenceable_or_null(6) i8* @realloc(i8* %0, i64 6) #3
- tail call void @free(i8* %2) #3
+ %2 = tail call align 16 dereferenceable_or_null(6) ptr @realloc(ptr %0, i64 6) #3
+ tail call void @free(ptr %2) #3
ret void
}
-declare dso_local noalias noundef i8* @realloc(i8* allocptr nocapture, i64 noundef) local_unnamed_addr #1
-declare dso_local void @free(i8* allocptr nocapture noundef) local_unnamed_addr #2
+declare dso_local noalias noundef ptr @realloc(ptr allocptr nocapture, i64 noundef) local_unnamed_addr #1
+declare dso_local void @free(ptr allocptr nocapture noundef) local_unnamed_addr #2
attributes #0 = { mustprogress nounwind uwtable willreturn }
attributes #1 = { inaccessiblemem_or_argmemonly mustprogress nounwind willreturn allockind("realloc") }
; NOTE: Assertions have been autogenerated by utils/update_test_checks.py
; RUN: opt < %s -passes=instcombine -S | FileCheck %s
-declare i8* @realloc(i8* allocptr, i64) allockind("realloc") allocsize(1)
-declare noalias i8* @malloc(i64) allockind("alloc,uninitialized")
+declare ptr @realloc(ptr allocptr, i64) allockind("realloc") allocsize(1)
+declare noalias ptr @malloc(i64) allockind("alloc,uninitialized")
-define i8* @realloc_null_ptr() #0 {
+define ptr @realloc_null_ptr() #0 {
; CHECK-LABEL: @realloc_null_ptr(
-; CHECK-NEXT: [[MALLOC:%.*]] = call dereferenceable_or_null(100) i8* @malloc(i64 100)
-; CHECK-NEXT: ret i8* [[MALLOC]]
+; CHECK-NEXT: [[MALLOC:%.*]] = call dereferenceable_or_null(100) ptr @malloc(i64 100)
+; CHECK-NEXT: ret ptr [[MALLOC]]
;
- %call = call i8* @realloc(i8* null, i64 100) #2
- ret i8* %call
+ %call = call ptr @realloc(ptr null, i64 100) #2
+ ret ptr %call
}
-define i8* @realloc_unknown_ptr(i8* %ptr) #0 {
+define ptr @realloc_unknown_ptr(ptr %ptr) #0 {
; CHECK-LABEL: @realloc_unknown_ptr(
-; CHECK-NEXT: [[CALL:%.*]] = call dereferenceable_or_null(100) i8* @realloc(i8* [[PTR:%.*]], i64 100)
-; CHECK-NEXT: ret i8* [[CALL]]
+; CHECK-NEXT: [[CALL:%.*]] = call dereferenceable_or_null(100) ptr @realloc(ptr [[PTR:%.*]], i64 100)
+; CHECK-NEXT: ret ptr [[CALL]]
;
- %call = call i8* @realloc(i8* %ptr, i64 100) #2
- ret i8* %call
+ %call = call ptr @realloc(ptr %ptr, i64 100) #2
+ ret ptr %call
}
br label %loop
}
-define i64 @test_or4(i64 %a, i64* %p) {
+define i64 @test_or4(i64 %a, ptr %p) {
; CHECK-LABEL: @test_or4(
; CHECK-NEXT: entry:
; CHECK-NEXT: br label [[LOOP:%.*]]
; CHECK: loop:
; CHECK-NEXT: [[IV:%.*]] = phi i64 [ [[A:%.*]], [[ENTRY:%.*]] ], [ [[IV_NEXT:%.*]], [[LOOP]] ]
-; CHECK-NEXT: [[STEP:%.*]] = load volatile i64, i64* [[P:%.*]], align 4
+; CHECK-NEXT: [[STEP:%.*]] = load volatile i64, ptr [[P:%.*]], align 4
; CHECK-NEXT: [[IV_NEXT]] = or i64 [[IV]], [[STEP]]
; CHECK-NEXT: tail call void @use(i64 [[IV_NEXT]])
; CHECK-NEXT: br label [[LOOP]]
loop: ; preds = %loop, %entry
%iv = phi i64 [ %a, %entry ], [ %iv.next, %loop ]
- %step = load volatile i64, i64* %p
+ %step = load volatile i64, ptr %p
%iv.next = or i64 %iv, %step
tail call void @use(i64 %iv.next)
br label %loop
}
-define i64 @test_and4(i64 %a, i64* %p) {
+define i64 @test_and4(i64 %a, ptr %p) {
; CHECK-LABEL: @test_and4(
; CHECK-NEXT: entry:
; CHECK-NEXT: br label [[LOOP:%.*]]
; CHECK: loop:
; CHECK-NEXT: [[IV:%.*]] = phi i64 [ [[A:%.*]], [[ENTRY:%.*]] ], [ [[IV_NEXT:%.*]], [[LOOP]] ]
-; CHECK-NEXT: [[STEP:%.*]] = load volatile i64, i64* [[P:%.*]], align 4
+; CHECK-NEXT: [[STEP:%.*]] = load volatile i64, ptr [[P:%.*]], align 4
; CHECK-NEXT: [[IV_NEXT]] = and i64 [[IV]], [[STEP]]
; CHECK-NEXT: tail call void @use(i64 [[IV_NEXT]])
; CHECK-NEXT: br label [[LOOP]]
loop: ; preds = %loop, %entry
%iv = phi i64 [ %a, %entry ], [ %iv.next, %loop ]
- %step = load volatile i64, i64* %p
+ %step = load volatile i64, ptr %p
%iv.next = and i64 %iv, %step
tail call void @use(i64 %iv.next)
br label %loop
; CHECK-NEXT: [[TMP4:%.*]] = sub i8 0, [[TMP3]]
; CHECK-NEXT: [[TMP5:%.*]] = extractelement <128 x i1> [[X]], i64 0
; CHECK-NEXT: [[EXT:%.*]] = sext i1 [[TMP5]] to i8
-; CHECK-NEXT: store i8 [[EXT]], i8* @glob, align 1
+; CHECK-NEXT: store i8 [[EXT]], ptr @glob, align 1
; CHECK-NEXT: ret i8 [[TMP4]]
;
%sext = sext <128 x i1> %x to <128 x i8>
%res = call i8 @llvm.vector.reduce.add.v128i8(<128 x i8> %sext)
%ext = extractelement <128 x i8> %sext, i32 0
- store i8 %ext, i8* @glob, align 1
+ store i8 %ext, ptr @glob, align 1
ret i8 %res
}
; CHECK-NEXT: [[TMP3:%.*]] = zext i8 [[TMP2]] to i64
; CHECK-NEXT: [[TMP4:%.*]] = extractelement <8 x i1> [[X]], i64 0
; CHECK-NEXT: [[EXT:%.*]] = zext i1 [[TMP4]] to i64
-; CHECK-NEXT: store i64 [[EXT]], i64* @glob1, align 8
+; CHECK-NEXT: store i64 [[EXT]], ptr @glob1, align 8
; CHECK-NEXT: ret i64 [[TMP3]]
;
%zext = zext <8 x i1> %x to <8 x i64>
%res = call i64 @llvm.vector.reduce.add.v8i64(<8 x i64> %zext)
%ext = extractelement <8 x i64> %zext, i32 0
- store i64 %ext, i64* @glob1, align 8
+ store i64 %ext, ptr @glob1, align 8
ret i64 %res
}
; CHECK-NEXT: [[TMP3:%.*]] = sext i1 [[TMP2]] to i8
; CHECK-NEXT: [[TMP4:%.*]] = extractelement <128 x i1> [[X]], i64 0
; CHECK-NEXT: [[EXT:%.*]] = sext i1 [[TMP4]] to i8
-; CHECK-NEXT: store i8 [[EXT]], i8* @glob, align 1
+; CHECK-NEXT: store i8 [[EXT]], ptr @glob, align 1
; CHECK-NEXT: ret i8 [[TMP3]]
;
%sext = sext <128 x i1> %x to <128 x i8>
%res = call i8 @llvm.vector.reduce.and.v128i8(<128 x i8> %sext)
%ext = extractelement <128 x i8> %sext, i32 0
- store i8 %ext, i8* @glob, align 1
+ store i8 %ext, ptr @glob, align 1
ret i8 %res
}
; CHECK-NEXT: [[TMP3:%.*]] = zext i1 [[TMP2]] to i64
; CHECK-NEXT: [[TMP4:%.*]] = extractelement <8 x i1> [[X]], i64 0
; CHECK-NEXT: [[EXT:%.*]] = zext i1 [[TMP4]] to i64
-; CHECK-NEXT: store i64 [[EXT]], i64* @glob1, align 8
+; CHECK-NEXT: store i64 [[EXT]], ptr @glob1, align 8
; CHECK-NEXT: ret i64 [[TMP3]]
;
%zext = zext <8 x i1> %x to <8 x i64>
%res = call i64 @llvm.vector.reduce.and.v8i64(<8 x i64> %zext)
%ext = extractelement <8 x i64> %zext, i32 0
- store i64 %ext, i64* @glob1, align 8
+ store i64 %ext, ptr @glob1, align 8
ret i64 %res
}
-define i1 @reduce_and_pointer_cast(i8* %arg, i8* %arg1) {
+define i1 @reduce_and_pointer_cast(ptr %arg, ptr %arg1) {
; CHECK-LABEL: @reduce_and_pointer_cast(
; CHECK-NEXT: bb:
-; CHECK-NEXT: [[TMP0:%.*]] = bitcast i8* [[ARG1:%.*]] to i64*
-; CHECK-NEXT: [[LHS1:%.*]] = load i64, i64* [[TMP0]], align 8
-; CHECK-NEXT: [[TMP1:%.*]] = bitcast i8* [[ARG:%.*]] to i64*
-; CHECK-NEXT: [[RHS2:%.*]] = load i64, i64* [[TMP1]], align 8
+; CHECK-NEXT: [[LHS1:%.*]] = load i64, ptr [[ARG1:%.*]], align 8
+; CHECK-NEXT: [[RHS2:%.*]] = load i64, ptr [[ARG:%.*]], align 8
; CHECK-NEXT: [[TMP2:%.*]] = icmp eq i64 [[LHS1]], [[RHS2]]
; CHECK-NEXT: ret i1 [[TMP2]]
;
bb:
- %ptr1 = bitcast i8* %arg1 to <8 x i8>*
- %ptr2 = bitcast i8* %arg to <8 x i8>*
- %lhs = load <8 x i8>, <8 x i8>* %ptr1
- %rhs = load <8 x i8>, <8 x i8>* %ptr2
+ %lhs = load <8 x i8>, ptr %arg1
+ %rhs = load <8 x i8>, ptr %arg
%cmp = icmp eq <8 x i8> %lhs, %rhs
%all_eq = call i1 @llvm.vector.reduce.and.v8i32(<8 x i1> %cmp)
ret i1 %all_eq
}
-define i1 @reduce_and_pointer_cast_wide(i8* %arg, i8* %arg1) {
+define i1 @reduce_and_pointer_cast_wide(ptr %arg, ptr %arg1) {
; CHECK-LABEL: @reduce_and_pointer_cast_wide(
; CHECK-NEXT: bb:
-; CHECK-NEXT: [[PTR1:%.*]] = bitcast i8* [[ARG1:%.*]] to <8 x i16>*
-; CHECK-NEXT: [[PTR2:%.*]] = bitcast i8* [[ARG:%.*]] to <8 x i16>*
-; CHECK-NEXT: [[LHS:%.*]] = load <8 x i16>, <8 x i16>* [[PTR1]], align 16
-; CHECK-NEXT: [[RHS:%.*]] = load <8 x i16>, <8 x i16>* [[PTR2]], align 16
+; CHECK-NEXT: [[LHS:%.*]] = load <8 x i16>, ptr [[ARG1:%.*]], align 16
+; CHECK-NEXT: [[RHS:%.*]] = load <8 x i16>, ptr [[ARG:%.*]], align 16
; CHECK-NEXT: [[CMP:%.*]] = icmp ne <8 x i16> [[LHS]], [[RHS]]
; CHECK-NEXT: [[TMP0:%.*]] = bitcast <8 x i1> [[CMP]] to i8
; CHECK-NEXT: [[TMP1:%.*]] = icmp eq i8 [[TMP0]], 0
; CHECK-NEXT: ret i1 [[TMP1]]
;
bb:
- %ptr1 = bitcast i8* %arg1 to <8 x i16>*
- %ptr2 = bitcast i8* %arg to <8 x i16>*
- %lhs = load <8 x i16>, <8 x i16>* %ptr1
- %rhs = load <8 x i16>, <8 x i16>* %ptr2
+ %lhs = load <8 x i16>, ptr %arg1
+ %rhs = load <8 x i16>, ptr %arg
%cmp = icmp eq <8 x i16> %lhs, %rhs
%all_eq = call i1 @llvm.vector.reduce.and.v8i32(<8 x i1> %cmp)
ret i1 %all_eq
}
-define i1 @reduce_and_pointer_cast_ne(i8* %arg, i8* %arg1) {
+define i1 @reduce_and_pointer_cast_ne(ptr %arg, ptr %arg1) {
; CHECK-LABEL: @reduce_and_pointer_cast_ne(
; CHECK-NEXT: bb:
-; CHECK-NEXT: [[TMP0:%.*]] = bitcast i8* [[ARG1:%.*]] to i64*
-; CHECK-NEXT: [[LHS1:%.*]] = load i64, i64* [[TMP0]], align 8
-; CHECK-NEXT: [[TMP1:%.*]] = bitcast i8* [[ARG:%.*]] to i64*
-; CHECK-NEXT: [[RHS2:%.*]] = load i64, i64* [[TMP1]], align 8
+; CHECK-NEXT: [[LHS1:%.*]] = load i64, ptr [[ARG1:%.*]], align 8
+; CHECK-NEXT: [[RHS2:%.*]] = load i64, ptr [[ARG:%.*]], align 8
; CHECK-NEXT: [[TMP2:%.*]] = icmp ne i64 [[LHS1]], [[RHS2]]
; CHECK-NEXT: ret i1 [[TMP2]]
;
bb:
- %ptr1 = bitcast i8* %arg1 to <8 x i8>*
- %ptr2 = bitcast i8* %arg to <8 x i8>*
- %lhs = load <8 x i8>, <8 x i8>* %ptr1
- %rhs = load <8 x i8>, <8 x i8>* %ptr2
+ %lhs = load <8 x i8>, ptr %arg1
+ %rhs = load <8 x i8>, ptr %arg
%cmp = icmp eq <8 x i8> %lhs, %rhs
%all_eq = call i1 @llvm.vector.reduce.and.v8i32(<8 x i1> %cmp)
%any_ne = xor i1 %all_eq, 1
ret i1 %any_ne
}
-define i1 @reduce_and_pointer_cast_ne_wide(i8* %arg, i8* %arg1) {
+define i1 @reduce_and_pointer_cast_ne_wide(ptr %arg, ptr %arg1) {
; CHECK-LABEL: @reduce_and_pointer_cast_ne_wide(
; CHECK-NEXT: bb:
-; CHECK-NEXT: [[PTR1:%.*]] = bitcast i8* [[ARG1:%.*]] to <8 x i16>*
-; CHECK-NEXT: [[PTR2:%.*]] = bitcast i8* [[ARG:%.*]] to <8 x i16>*
-; CHECK-NEXT: [[LHS:%.*]] = load <8 x i16>, <8 x i16>* [[PTR1]], align 16
-; CHECK-NEXT: [[RHS:%.*]] = load <8 x i16>, <8 x i16>* [[PTR2]], align 16
+; CHECK-NEXT: [[LHS:%.*]] = load <8 x i16>, ptr [[ARG1:%.*]], align 16
+; CHECK-NEXT: [[RHS:%.*]] = load <8 x i16>, ptr [[ARG:%.*]], align 16
; CHECK-NEXT: [[CMP:%.*]] = icmp ne <8 x i16> [[LHS]], [[RHS]]
; CHECK-NEXT: [[TMP0:%.*]] = bitcast <8 x i1> [[CMP]] to i8
; CHECK-NEXT: [[TMP1:%.*]] = icmp ne i8 [[TMP0]], 0
; CHECK-NEXT: ret i1 [[TMP1]]
;
bb:
- %ptr1 = bitcast i8* %arg1 to <8 x i16>*
- %ptr2 = bitcast i8* %arg to <8 x i16>*
- %lhs = load <8 x i16>, <8 x i16>* %ptr1
- %rhs = load <8 x i16>, <8 x i16>* %ptr2
+ %lhs = load <8 x i16>, ptr %arg1
+ %rhs = load <8 x i16>, ptr %arg
%cmp = icmp eq <8 x i16> %lhs, %rhs
%all_eq = call i1 @llvm.vector.reduce.and.v8i32(<8 x i1> %cmp)
%any_ne = xor i1 %all_eq, 1
; CHECK-NEXT: [[TMP3:%.*]] = zext i1 [[TMP2]] to i8
; CHECK-NEXT: [[TMP4:%.*]] = extractelement <128 x i1> [[X]], i64 0
; CHECK-NEXT: [[EXT:%.*]] = sext i1 [[TMP4]] to i8
-; CHECK-NEXT: store i8 [[EXT]], i8* @glob, align 1
+; CHECK-NEXT: store i8 [[EXT]], ptr @glob, align 1
; CHECK-NEXT: ret i8 [[TMP3]]
;
%sext = sext <128 x i1> %x to <128 x i8>
%res = call i8 @llvm.vector.reduce.mul.v128i8(<128 x i8> %sext)
%ext = extractelement <128 x i8> %sext, i32 0
- store i8 %ext, i8* @glob, align 1
+ store i8 %ext, ptr @glob, align 1
ret i8 %res
}
; CHECK-NEXT: [[TMP3:%.*]] = zext i1 [[TMP2]] to i64
; CHECK-NEXT: [[TMP4:%.*]] = extractelement <8 x i1> [[X]], i64 0
; CHECK-NEXT: [[EXT:%.*]] = zext i1 [[TMP4]] to i64
-; CHECK-NEXT: store i64 [[EXT]], i64* @glob1, align 8
+; CHECK-NEXT: store i64 [[EXT]], ptr @glob1, align 8
; CHECK-NEXT: ret i64 [[TMP3]]
;
%zext = zext <8 x i1> %x to <8 x i64>
%res = call i64 @llvm.vector.reduce.mul.v8i64(<8 x i64> %zext)
%ext = extractelement <8 x i64> %zext, i32 0
- store i64 %ext, i64* @glob1, align 8
+ store i64 %ext, ptr @glob1, align 8
ret i64 %res
}
; CHECK-NEXT: [[TMP3:%.*]] = sext i1 [[TMP2]] to i8
; CHECK-NEXT: [[TMP4:%.*]] = extractelement <128 x i1> [[X]], i64 0
; CHECK-NEXT: [[EXT:%.*]] = sext i1 [[TMP4]] to i8
-; CHECK-NEXT: store i8 [[EXT]], i8* @glob, align 1
+; CHECK-NEXT: store i8 [[EXT]], ptr @glob, align 1
; CHECK-NEXT: ret i8 [[TMP3]]
;
%sext = sext <128 x i1> %x to <128 x i8>
%res = call i8 @llvm.vector.reduce.or.v128i8(<128 x i8> %sext)
%ext = extractelement <128 x i8> %sext, i32 0
- store i8 %ext, i8* @glob, align 1
+ store i8 %ext, ptr @glob, align 1
ret i8 %res
}
; CHECK-NEXT: [[TMP3:%.*]] = zext i1 [[TMP2]] to i64
; CHECK-NEXT: [[TMP4:%.*]] = extractelement <8 x i1> [[X]], i64 0
; CHECK-NEXT: [[EXT:%.*]] = zext i1 [[TMP4]] to i64
-; CHECK-NEXT: store i64 [[EXT]], i64* @glob1, align 8
+; CHECK-NEXT: store i64 [[EXT]], ptr @glob1, align 8
; CHECK-NEXT: ret i64 [[TMP3]]
;
%zext = zext <8 x i1> %x to <8 x i64>
%res = call i64 @llvm.vector.reduce.or.v8i64(<8 x i64> %zext)
%ext = extractelement <8 x i64> %zext, i32 0
- store i64 %ext, i64* @glob1, align 8
+ store i64 %ext, ptr @glob1, align 8
ret i64 %res
}
-define i1 @reduce_or_pointer_cast(i8* %arg, i8* %arg1) {
+define i1 @reduce_or_pointer_cast(ptr %arg, ptr %arg1) {
; CHECK-LABEL: @reduce_or_pointer_cast(
; CHECK-NEXT: bb:
-; CHECK-NEXT: [[TMP0:%.*]] = bitcast i8* [[ARG1:%.*]] to i64*
-; CHECK-NEXT: [[LHS1:%.*]] = load i64, i64* [[TMP0]], align 8
-; CHECK-NEXT: [[TMP1:%.*]] = bitcast i8* [[ARG:%.*]] to i64*
-; CHECK-NEXT: [[RHS2:%.*]] = load i64, i64* [[TMP1]], align 8
+; CHECK-NEXT: [[LHS1:%.*]] = load i64, ptr [[ARG1:%.*]], align 8
+; CHECK-NEXT: [[RHS2:%.*]] = load i64, ptr [[ARG:%.*]], align 8
; CHECK-NEXT: [[DOTNOT:%.*]] = icmp eq i64 [[LHS1]], [[RHS2]]
; CHECK-NEXT: ret i1 [[DOTNOT]]
;
bb:
- %ptr1 = bitcast i8* %arg1 to <8 x i8>*
- %ptr2 = bitcast i8* %arg to <8 x i8>*
- %lhs = load <8 x i8>, <8 x i8>* %ptr1
- %rhs = load <8 x i8>, <8 x i8>* %ptr2
+ %lhs = load <8 x i8>, ptr %arg1
+ %rhs = load <8 x i8>, ptr %arg
%cmp = icmp ne <8 x i8> %lhs, %rhs
%any_ne = call i1 @llvm.vector.reduce.or.v8i32(<8 x i1> %cmp)
%all_eq = xor i1 %any_ne, 1
ret i1 %all_eq
}
-define i1 @reduce_or_pointer_cast_wide(i8* %arg, i8* %arg1) {
+define i1 @reduce_or_pointer_cast_wide(ptr %arg, ptr %arg1) {
; CHECK-LABEL: @reduce_or_pointer_cast_wide(
; CHECK-NEXT: bb:
-; CHECK-NEXT: [[PTR1:%.*]] = bitcast i8* [[ARG1:%.*]] to <8 x i16>*
-; CHECK-NEXT: [[PTR2:%.*]] = bitcast i8* [[ARG:%.*]] to <8 x i16>*
-; CHECK-NEXT: [[LHS:%.*]] = load <8 x i16>, <8 x i16>* [[PTR1]], align 16
-; CHECK-NEXT: [[RHS:%.*]] = load <8 x i16>, <8 x i16>* [[PTR2]], align 16
+; CHECK-NEXT: [[LHS:%.*]] = load <8 x i16>, ptr [[ARG1:%.*]], align 16
+; CHECK-NEXT: [[RHS:%.*]] = load <8 x i16>, ptr [[ARG:%.*]], align 16
; CHECK-NEXT: [[CMP:%.*]] = icmp ne <8 x i16> [[LHS]], [[RHS]]
; CHECK-NEXT: [[TMP0:%.*]] = bitcast <8 x i1> [[CMP]] to i8
; CHECK-NEXT: [[DOTNOT:%.*]] = icmp eq i8 [[TMP0]], 0
; CHECK-NEXT: ret i1 [[DOTNOT]]
;
bb:
- %ptr1 = bitcast i8* %arg1 to <8 x i16>*
- %ptr2 = bitcast i8* %arg to <8 x i16>*
- %lhs = load <8 x i16>, <8 x i16>* %ptr1
- %rhs = load <8 x i16>, <8 x i16>* %ptr2
+ %lhs = load <8 x i16>, ptr %arg1
+ %rhs = load <8 x i16>, ptr %arg
%cmp = icmp ne <8 x i16> %lhs, %rhs
%any_ne = call i1 @llvm.vector.reduce.or.v8i32(<8 x i1> %cmp)
%all_eq = xor i1 %any_ne, 1
}
-define i1 @reduce_or_pointer_cast_ne(i8* %arg, i8* %arg1) {
+define i1 @reduce_or_pointer_cast_ne(ptr %arg, ptr %arg1) {
; CHECK-LABEL: @reduce_or_pointer_cast_ne(
; CHECK-NEXT: bb:
-; CHECK-NEXT: [[TMP0:%.*]] = bitcast i8* [[ARG1:%.*]] to i64*
-; CHECK-NEXT: [[LHS1:%.*]] = load i64, i64* [[TMP0]], align 8
-; CHECK-NEXT: [[TMP1:%.*]] = bitcast i8* [[ARG:%.*]] to i64*
-; CHECK-NEXT: [[RHS2:%.*]] = load i64, i64* [[TMP1]], align 8
+; CHECK-NEXT: [[LHS1:%.*]] = load i64, ptr [[ARG1:%.*]], align 8
+; CHECK-NEXT: [[RHS2:%.*]] = load i64, ptr [[ARG:%.*]], align 8
; CHECK-NEXT: [[TMP2:%.*]] = icmp ne i64 [[LHS1]], [[RHS2]]
; CHECK-NEXT: ret i1 [[TMP2]]
;
bb:
- %ptr1 = bitcast i8* %arg1 to <8 x i8>*
- %ptr2 = bitcast i8* %arg to <8 x i8>*
- %lhs = load <8 x i8>, <8 x i8>* %ptr1
- %rhs = load <8 x i8>, <8 x i8>* %ptr2
+ %lhs = load <8 x i8>, ptr %arg1
+ %rhs = load <8 x i8>, ptr %arg
%cmp = icmp ne <8 x i8> %lhs, %rhs
%any_ne = call i1 @llvm.vector.reduce.or.v8i32(<8 x i1> %cmp)
ret i1 %any_ne
}
-define i1 @reduce_or_pointer_cast_ne_wide(i8* %arg, i8* %arg1) {
+define i1 @reduce_or_pointer_cast_ne_wide(ptr %arg, ptr %arg1) {
; CHECK-LABEL: @reduce_or_pointer_cast_ne_wide(
; CHECK-NEXT: bb:
-; CHECK-NEXT: [[PTR1:%.*]] = bitcast i8* [[ARG1:%.*]] to <8 x i16>*
-; CHECK-NEXT: [[PTR2:%.*]] = bitcast i8* [[ARG:%.*]] to <8 x i16>*
-; CHECK-NEXT: [[LHS:%.*]] = load <8 x i16>, <8 x i16>* [[PTR1]], align 16
-; CHECK-NEXT: [[RHS:%.*]] = load <8 x i16>, <8 x i16>* [[PTR2]], align 16
+; CHECK-NEXT: [[LHS:%.*]] = load <8 x i16>, ptr [[ARG1:%.*]], align 16
+; CHECK-NEXT: [[RHS:%.*]] = load <8 x i16>, ptr [[ARG:%.*]], align 16
; CHECK-NEXT: [[CMP:%.*]] = icmp ne <8 x i16> [[LHS]], [[RHS]]
; CHECK-NEXT: [[TMP0:%.*]] = bitcast <8 x i1> [[CMP]] to i8
; CHECK-NEXT: [[TMP1:%.*]] = icmp ne i8 [[TMP0]], 0
; CHECK-NEXT: ret i1 [[TMP1]]
;
bb:
- %ptr1 = bitcast i8* %arg1 to <8 x i16>*
- %ptr2 = bitcast i8* %arg to <8 x i16>*
- %lhs = load <8 x i16>, <8 x i16>* %ptr1
- %rhs = load <8 x i16>, <8 x i16>* %ptr2
+ %lhs = load <8 x i16>, ptr %arg1
+ %rhs = load <8 x i16>, ptr %arg
%cmp = icmp ne <8 x i16> %lhs, %rhs
%any_ne = call i1 @llvm.vector.reduce.or.v8i32(<8 x i1> %cmp)
ret i1 %any_ne
; CHECK-NEXT: [[TMP3:%.*]] = sext i1 [[TMP2]] to i8
; CHECK-NEXT: [[TMP4:%.*]] = extractelement <128 x i1> [[X]], i64 0
; CHECK-NEXT: [[EXT:%.*]] = sext i1 [[TMP4]] to i8
-; CHECK-NEXT: store i8 [[EXT]], i8* @glob, align 1
+; CHECK-NEXT: store i8 [[EXT]], ptr @glob, align 1
; CHECK-NEXT: ret i8 [[TMP3]]
;
%sext = sext <128 x i1> %x to <128 x i8>
%res = call i8 @llvm.vector.reduce.smax.v128i8(<128 x i8> %sext)
%ext = extractelement <128 x i8> %sext, i32 0
- store i8 %ext, i8* @glob, align 1
+ store i8 %ext, ptr @glob, align 1
ret i8 %res
}
; CHECK-NEXT: [[TMP3:%.*]] = zext i1 [[TMP2]] to i64
; CHECK-NEXT: [[TMP4:%.*]] = extractelement <8 x i1> [[X]], i64 0
; CHECK-NEXT: [[EXT:%.*]] = zext i1 [[TMP4]] to i64
-; CHECK-NEXT: store i64 [[EXT]], i64* @glob1, align 8
+; CHECK-NEXT: store i64 [[EXT]], ptr @glob1, align 8
; CHECK-NEXT: ret i64 [[TMP3]]
;
%zext = zext <8 x i1> %x to <8 x i64>
%res = call i64 @llvm.vector.reduce.smax.v8i64(<8 x i64> %zext)
%ext = extractelement <8 x i64> %zext, i32 0
- store i64 %ext, i64* @glob1, align 8
+ store i64 %ext, ptr @glob1, align 8
ret i64 %res
}
; CHECK-NEXT: [[TMP3:%.*]] = sext i1 [[TMP2]] to i8
; CHECK-NEXT: [[TMP4:%.*]] = extractelement <128 x i1> [[X]], i64 0
; CHECK-NEXT: [[EXT:%.*]] = sext i1 [[TMP4]] to i8
-; CHECK-NEXT: store i8 [[EXT]], i8* @glob, align 1
+; CHECK-NEXT: store i8 [[EXT]], ptr @glob, align 1
; CHECK-NEXT: ret i8 [[TMP3]]
;
%sext = sext <128 x i1> %x to <128 x i8>
%res = call i8 @llvm.vector.reduce.smin.v128i8(<128 x i8> %sext)
%ext = extractelement <128 x i8> %sext, i32 0
- store i8 %ext, i8* @glob, align 1
+ store i8 %ext, ptr @glob, align 1
ret i8 %res
}
; CHECK-NEXT: [[TMP3:%.*]] = zext i1 [[TMP2]] to i64
; CHECK-NEXT: [[TMP4:%.*]] = extractelement <8 x i1> [[X]], i64 0
; CHECK-NEXT: [[EXT:%.*]] = zext i1 [[TMP4]] to i64
-; CHECK-NEXT: store i64 [[EXT]], i64* @glob1, align 8
+; CHECK-NEXT: store i64 [[EXT]], ptr @glob1, align 8
; CHECK-NEXT: ret i64 [[TMP3]]
;
%zext = zext <8 x i1> %x to <8 x i64>
%res = call i64 @llvm.vector.reduce.smin.v8i64(<8 x i64> %zext)
%ext = extractelement <8 x i64> %zext, i32 0
- store i64 %ext, i64* @glob1, align 8
+ store i64 %ext, ptr @glob1, align 8
ret i64 %res
}
; CHECK-NEXT: [[TMP3:%.*]] = sext i1 [[TMP2]] to i8
; CHECK-NEXT: [[TMP4:%.*]] = extractelement <128 x i1> [[X]], i64 0
; CHECK-NEXT: [[EXT:%.*]] = sext i1 [[TMP4]] to i8
-; CHECK-NEXT: store i8 [[EXT]], i8* @glob, align 1
+; CHECK-NEXT: store i8 [[EXT]], ptr @glob, align 1
; CHECK-NEXT: ret i8 [[TMP3]]
;
%sext = sext <128 x i1> %x to <128 x i8>
%res = call i8 @llvm.vector.reduce.umax.v128i8(<128 x i8> %sext)
%ext = extractelement <128 x i8> %sext, i32 0
- store i8 %ext, i8* @glob, align 1
+ store i8 %ext, ptr @glob, align 1
ret i8 %res
}
; CHECK-NEXT: [[TMP3:%.*]] = zext i1 [[TMP2]] to i64
; CHECK-NEXT: [[TMP4:%.*]] = extractelement <8 x i1> [[X]], i64 0
; CHECK-NEXT: [[EXT:%.*]] = zext i1 [[TMP4]] to i64
-; CHECK-NEXT: store i64 [[EXT]], i64* @glob1, align 8
+; CHECK-NEXT: store i64 [[EXT]], ptr @glob1, align 8
; CHECK-NEXT: ret i64 [[TMP3]]
;
%zext = zext <8 x i1> %x to <8 x i64>
%res = call i64 @llvm.vector.reduce.umax.v8i64(<8 x i64> %zext)
%ext = extractelement <8 x i64> %zext, i32 0
- store i64 %ext, i64* @glob1, align 8
+ store i64 %ext, ptr @glob1, align 8
ret i64 %res
}
; CHECK-NEXT: [[TMP3:%.*]] = sext i1 [[TMP2]] to i8
; CHECK-NEXT: [[TMP4:%.*]] = extractelement <128 x i1> [[X]], i64 0
; CHECK-NEXT: [[EXT:%.*]] = sext i1 [[TMP4]] to i8
-; CHECK-NEXT: store i8 [[EXT]], i8* @glob, align 1
+; CHECK-NEXT: store i8 [[EXT]], ptr @glob, align 1
; CHECK-NEXT: ret i8 [[TMP3]]
;
%sext = sext <128 x i1> %x to <128 x i8>
%res = call i8 @llvm.vector.reduce.umin.v128i8(<128 x i8> %sext)
%ext = extractelement <128 x i8> %sext, i32 0
- store i8 %ext, i8* @glob, align 1
+ store i8 %ext, ptr @glob, align 1
ret i8 %res
}
; CHECK-NEXT: [[TMP3:%.*]] = zext i1 [[TMP2]] to i64
; CHECK-NEXT: [[TMP4:%.*]] = extractelement <8 x i1> [[X]], i64 0
; CHECK-NEXT: [[EXT:%.*]] = zext i1 [[TMP4]] to i64
-; CHECK-NEXT: store i64 [[EXT]], i64* @glob1, align 8
+; CHECK-NEXT: store i64 [[EXT]], ptr @glob1, align 8
; CHECK-NEXT: ret i64 [[TMP3]]
;
%zext = zext <8 x i1> %x to <8 x i64>
%res = call i64 @llvm.vector.reduce.umin.v8i64(<8 x i64> %zext)
%ext = extractelement <8 x i64> %zext, i32 0
- store i64 %ext, i64* @glob1, align 8
+ store i64 %ext, ptr @glob1, align 8
ret i64 %res
}
; CHECK-NEXT: [[TMP5:%.*]] = sub nsw i8 0, [[TMP4]]
; CHECK-NEXT: [[TMP6:%.*]] = extractelement <128 x i1> [[X]], i64 0
; CHECK-NEXT: [[EXT:%.*]] = sext i1 [[TMP6]] to i8
-; CHECK-NEXT: store i8 [[EXT]], i8* @glob, align 1
+; CHECK-NEXT: store i8 [[EXT]], ptr @glob, align 1
; CHECK-NEXT: ret i8 [[TMP5]]
;
%sext = sext <128 x i1> %x to <128 x i8>
%res = call i8 @llvm.vector.reduce.xor.v128i8(<128 x i8> %sext)
%ext = extractelement <128 x i8> %sext, i32 0
- store i8 %ext, i8* @glob, align 1
+ store i8 %ext, ptr @glob, align 1
ret i8 %res
}
; CHECK-NEXT: [[TMP4:%.*]] = zext i8 [[TMP3]] to i64
; CHECK-NEXT: [[TMP5:%.*]] = extractelement <8 x i1> [[X]], i64 0
; CHECK-NEXT: [[EXT:%.*]] = zext i1 [[TMP5]] to i64
-; CHECK-NEXT: store i64 [[EXT]], i64* @glob1, align 8
+; CHECK-NEXT: store i64 [[EXT]], ptr @glob1, align 8
; CHECK-NEXT: ret i64 [[TMP4]]
;
%zext = zext <8 x i1> %x to <8 x i64>
%res = call i64 @llvm.vector.reduce.xor.v8i64(<8 x i64> %zext)
%ext = extractelement <8 x i64> %zext, i32 0
- store i64 %ext, i64* @glob1, align 8
+ store i64 %ext, ptr @glob1, align 8
ret i64 %res
}
ret <2 x i64> %R
}
-define i32 @test21(i1 %c0, i32* %p) {
+define i32 @test21(i1 %c0, ptr %p) {
; CHECK-LABEL: @test21(
; CHECK-NEXT: entry:
; CHECK-NEXT: br i1 [[C0:%.*]], label [[IF_THEN:%.*]], label [[IF_END:%.*]]
; CHECK: if.then:
-; CHECK-NEXT: [[V:%.*]] = load volatile i32, i32* [[P:%.*]], align 4
+; CHECK-NEXT: [[V:%.*]] = load volatile i32, ptr [[P:%.*]], align 4
; CHECK-NEXT: [[PHI_BO:%.*]] = srem i32 [[V]], 5
; CHECK-NEXT: br label [[IF_END]]
; CHECK: if.end:
br i1 %c0, label %if.then, label %if.end
if.then:
- %v = load volatile i32, i32* %p
+ %v = load volatile i32, ptr %p
br label %if.end
if.end:
@a = common global [5 x i16] zeroinitializer, align 2
@b = common global i16 0, align 2
-define i32 @pr27968_0(i1 %c0, i32* %p) {
+define i32 @pr27968_0(i1 %c0, ptr %p) {
; CHECK-LABEL: @pr27968_0(
; CHECK-NEXT: entry:
; CHECK-NEXT: br i1 [[C0:%.*]], label [[IF_THEN:%.*]], label [[IF_END:%.*]]
; CHECK: if.then:
-; CHECK-NEXT: [[V:%.*]] = load volatile i32, i32* [[P:%.*]], align 4
+; CHECK-NEXT: [[V:%.*]] = load volatile i32, ptr [[P:%.*]], align 4
; CHECK-NEXT: br label [[IF_END]]
; CHECK: if.end:
-; CHECK-NEXT: br i1 icmp eq (i16* getelementptr inbounds ([5 x i16], [5 x i16]* @a, i64 0, i64 4), i16* @b), label [[REM_IS_SAFE:%.*]], label [[REM_IS_UNSAFE:%.*]]
+; CHECK-NEXT: br i1 icmp eq (ptr getelementptr inbounds ([5 x i16], ptr @a, i64 0, i64 4), ptr @b), label [[REM_IS_SAFE:%.*]], label [[REM_IS_UNSAFE:%.*]]
; CHECK: rem.is.safe:
; CHECK-NEXT: ret i32 0
; CHECK: rem.is.unsafe:
br i1 %c0, label %if.then, label %if.end
if.then:
- %v = load volatile i32, i32* %p
+ %v = load volatile i32, ptr %p
br label %if.end
if.end:
%lhs = phi i32 [ %v, %if.then ], [ 5, %entry ]
- br i1 icmp eq (i16* getelementptr inbounds ([5 x i16], [5 x i16]* @a, i64 0, i64 4), i16* @b), label %rem.is.safe, label %rem.is.unsafe
+ br i1 icmp eq (ptr getelementptr inbounds ([5 x i16], ptr @a, i64 0, i64 4), ptr @b), label %rem.is.safe, label %rem.is.unsafe
rem.is.safe:
- %rem = srem i32 %lhs, zext (i1 icmp eq (i16* getelementptr inbounds ([5 x i16], [5 x i16]* @a, i64 0, i64 4), i16* @b) to i32)
+ %rem = srem i32 %lhs, zext (i1 icmp eq (ptr getelementptr inbounds ([5 x i16], ptr @a, i64 0, i64 4), ptr @b) to i32)
ret i32 %rem
rem.is.unsafe:
ret i32 0
}
-define i32 @pr27968_1(i1 %c0, i1 %always_false, i32* %p) {
+define i32 @pr27968_1(i1 %c0, i1 %always_false, ptr %p) {
; CHECK-LABEL: @pr27968_1(
; CHECK-NEXT: entry:
; CHECK-NEXT: br i1 [[C0:%.*]], label [[IF_THEN:%.*]], label [[IF_END:%.*]]
; CHECK: if.then:
-; CHECK-NEXT: [[V:%.*]] = load volatile i32, i32* [[P:%.*]], align 4
+; CHECK-NEXT: [[V:%.*]] = load volatile i32, ptr [[P:%.*]], align 4
; CHECK-NEXT: br label [[IF_END]]
; CHECK: if.end:
; CHECK-NEXT: [[LHS:%.*]] = phi i32 [ [[V]], [[IF_THEN]] ], [ 5, [[ENTRY:%.*]] ]
br i1 %c0, label %if.then, label %if.end
if.then:
- %v = load volatile i32, i32* %p
+ %v = load volatile i32, ptr %p
br label %if.end
if.end:
ret i32 0
}
-define i32 @pr27968_2(i1 %c0, i32* %p) {
+define i32 @pr27968_2(i1 %c0, ptr %p) {
; CHECK-LABEL: @pr27968_2(
; CHECK-NEXT: entry:
; CHECK-NEXT: br i1 [[C0:%.*]], label [[IF_THEN:%.*]], label [[IF_END:%.*]]
; CHECK: if.then:
-; CHECK-NEXT: [[V:%.*]] = load volatile i32, i32* [[P:%.*]], align 4
+; CHECK-NEXT: [[V:%.*]] = load volatile i32, ptr [[P:%.*]], align 4
; CHECK-NEXT: br label [[IF_END]]
; CHECK: if.end:
-; CHECK-NEXT: br i1 icmp eq (i16* getelementptr inbounds ([5 x i16], [5 x i16]* @a, i64 0, i64 4), i16* @b), label [[REM_IS_SAFE:%.*]], label [[REM_IS_UNSAFE:%.*]]
+; CHECK-NEXT: br i1 icmp eq (ptr getelementptr inbounds ([5 x i16], ptr @a, i64 0, i64 4), ptr @b), label [[REM_IS_SAFE:%.*]], label [[REM_IS_UNSAFE:%.*]]
; CHECK: rem.is.safe:
; CHECK-NEXT: ret i32 0
; CHECK: rem.is.unsafe:
br i1 %c0, label %if.then, label %if.end
if.then:
- %v = load volatile i32, i32* %p
+ %v = load volatile i32, ptr %p
br label %if.end
if.end:
%lhs = phi i32 [ %v, %if.then ], [ 5, %entry ]
- br i1 icmp eq (i16* getelementptr inbounds ([5 x i16], [5 x i16]* @a, i64 0, i64 4), i16* @b), label %rem.is.safe, label %rem.is.unsafe
+ br i1 icmp eq (ptr getelementptr inbounds ([5 x i16], ptr @a, i64 0, i64 4), ptr @b), label %rem.is.safe, label %rem.is.unsafe
rem.is.safe:
- %rem = urem i32 %lhs, zext (i1 icmp eq (i16* getelementptr inbounds ([5 x i16], [5 x i16]* @a, i64 0, i64 4), i16* @b) to i32)
+ %rem = urem i32 %lhs, zext (i1 icmp eq (ptr getelementptr inbounds ([5 x i16], ptr @a, i64 0, i64 4), ptr @b) to i32)
ret i32 %rem
rem.is.unsafe:
ret i32 0
}
-define i32 @pr27968_3(i1 %c0, i1 %always_false, i32* %p) {
+define i32 @pr27968_3(i1 %c0, i1 %always_false, ptr %p) {
; CHECK-LABEL: @pr27968_3(
; CHECK-NEXT: entry:
; CHECK-NEXT: br i1 [[C0:%.*]], label [[IF_THEN:%.*]], label [[IF_END:%.*]]
; CHECK: if.then:
-; CHECK-NEXT: [[V:%.*]] = load volatile i32, i32* [[P:%.*]], align 4
+; CHECK-NEXT: [[V:%.*]] = load volatile i32, ptr [[P:%.*]], align 4
; CHECK-NEXT: [[PHI_BO:%.*]] = and i32 [[V]], 2147483647
; CHECK-NEXT: br label [[IF_END]]
; CHECK: if.end:
br i1 %c0, label %if.then, label %if.end
if.then:
- %v = load volatile i32, i32* %p
+ %v = load volatile i32, ptr %p
br label %if.end
if.end:
ret i1 %E
}
-define i1 @test27(i32 %A, i32* %remdst) {
+define i1 @test27(i32 %A, ptr %remdst) {
; CHECK-LABEL: @test27(
; CHECK-NEXT: [[B:%.*]] = srem i32 [[A:%.*]], -2147483648
-; CHECK-NEXT: store i32 [[B]], i32* [[REMDST:%.*]], align 1
+; CHECK-NEXT: store i32 [[B]], ptr [[REMDST:%.*]], align 1
; CHECK-NEXT: [[C:%.*]] = icmp ne i32 [[B]], 0
; CHECK-NEXT: ret i1 [[C]]
;
%B = srem i32 %A, 2147483648 ; signbit
- store i32 %B, i32* %remdst, align 1 ; extra use of rem
+ store i32 %B, ptr %remdst, align 1 ; extra use of rem
%C = icmp ne i32 %B, 0
ret i1 %C
}
ret double %f_prod.0.lcssa
}
-define i32 @test_int_phi_operands(i32* %arr_d) {
+define i32 @test_int_phi_operands(ptr %arr_d) {
; CHECK-LABEL: @test_int_phi_operands(
; CHECK-NEXT: entry:
; CHECK-NEXT: br label [[FOR_BODY:%.*]]
for.body: ; preds = %entry, %for.body
%i.02 = phi i64 [ 0, %entry ], [ %inc, %for.body ]
%f_prod.01 = phi i32 [ 0, %entry ], [ %mul, %for.body ]
- %arrayidx = getelementptr inbounds i32, i32* %arr_d, i64 %i.02
- %0 = load i32, i32* %arrayidx, align 4
+ %arrayidx = getelementptr inbounds i32, ptr %arr_d, i64 %i.02
+ %0 = load i32, ptr %arrayidx, align 4
%mul = mul nsw i32 %f_prod.01, %0
%inc = add i64 %i.02, 1
%cmp = icmp ult i64 %inc, 1000
ret i32 %f_prod.0.lcssa
}
-define i32 @test_int_phi_operands_initalise_to_non_zero(i32* %arr_d) {
+define i32 @test_int_phi_operands_initalise_to_non_zero(ptr %arr_d) {
; CHECK-LABEL: @test_int_phi_operands_initalise_to_non_zero(
; CHECK-NEXT: entry:
; CHECK-NEXT: br label [[FOR_BODY:%.*]]
for.body: ; preds = %entry, %for.body
%i.02 = phi i64 [ 0, %entry ], [ %inc, %for.body ]
%f_prod.01 = phi i32 [ 1, %entry ], [ %mul, %for.body ]
- %arrayidx = getelementptr inbounds i32, i32* %arr_d, i64 %i.02
- %0 = load i32, i32* %arrayidx, align 4
+ %arrayidx = getelementptr inbounds i32, ptr %arr_d, i64 %i.02
+ %0 = load i32, ptr %arrayidx, align 4
%mul = mul i32 %f_prod.01, %0
%inc = add i64 %i.02, 1
%cmp = icmp ult i64 %inc, 1000
ret i32 %f_prod.0.lcssa
}
-define i32 @test_multiple_int_phi_operands(i32* %arr_d, i1 %entry_cond) {
+define i32 @test_multiple_int_phi_operands(ptr %arr_d, i1 %entry_cond) {
; CHECK-LABEL: @test_multiple_int_phi_operands(
; CHECK-NEXT: entry:
; CHECK-NEXT: br i1 [[ENTRY_COND:%.*]], label [[FOR_BODY:%.*]], label [[ENTRY_2:%.*]]
for.body: ; preds = %entry, %for.body
%i.02 = phi i64 [ 0, %entry ], [0, %entry_2], [ %inc, %for.body ]
%f_prod.01 = phi i32 [ 0, %entry ], [0, %entry_2], [ %mul, %for.body ]
- %arrayidx = getelementptr inbounds i32, i32* %arr_d, i64 %i.02
- %0 = load i32, i32* %arrayidx, align 4
+ %arrayidx = getelementptr inbounds i32, ptr %arr_d, i64 %i.02
+ %0 = load i32, ptr %arrayidx, align 4
%mul = mul i32 %f_prod.01, %0
%inc = add i64 %i.02, 1
%cmp = icmp ult i64 %inc, 1000
ret i32 %f_prod.0.lcssa
}
-define i32 @test_multiple_int_phi_operands_initalise_to_non_zero(i32* %arr_d, i1 %entry_cond) {
+define i32 @test_multiple_int_phi_operands_initalise_to_non_zero(ptr %arr_d, i1 %entry_cond) {
; CHECK-LABEL: @test_multiple_int_phi_operands_initalise_to_non_zero(
; CHECK-NEXT: entry:
; CHECK-NEXT: br i1 [[ENTRY_COND:%.*]], label [[FOR_BODY:%.*]], label [[ENTRY_2:%.*]]
for.body: ; preds = %entry, %for.body
%i.02 = phi i64 [ 0, %entry ], [0, %entry_2], [ %inc, %for.body ]
%f_prod.01 = phi i32 [ 0, %entry ], [1, %entry_2], [ %mul, %for.body ]
- %arrayidx = getelementptr inbounds i32, i32* %arr_d, i64 %i.02
- %0 = load i32, i32* %arrayidx, align 4
+ %arrayidx = getelementptr inbounds i32, ptr %arr_d, i64 %i.02
+ %0 = load i32, ptr %arrayidx, align 4
%mul = mul i32 %f_prod.01, %0
%inc = add i64 %i.02, 1
%cmp = icmp ult i64 %inc, 1000
;-------------------------------------------------------------------------------
-define i1 @t10(i64 %base, i64* nonnull %offsetptr) {
+define i1 @t10(i64 %base, ptr nonnull %offsetptr) {
; CHECK-LABEL: @t10(
-; CHECK-NEXT: [[OFFSET:%.*]] = ptrtoint i64* [[OFFSETPTR:%.*]] to i64
+; CHECK-NEXT: [[OFFSET:%.*]] = ptrtoint ptr [[OFFSETPTR:%.*]] to i64
; CHECK-NEXT: [[ADJUSTED:%.*]] = sub i64 [[BASE:%.*]], [[OFFSET]]
; CHECK-NEXT: call void @use64(i64 [[ADJUSTED]])
; CHECK-NEXT: [[NO_UNDERFLOW:%.*]] = icmp ule i64 [[OFFSET]], [[BASE]]
; CHECK-NEXT: [[TMP1:%.*]] = icmp ult i64 [[OFFSET]], [[BASE]]
; CHECK-NEXT: ret i1 [[TMP1]]
;
- %offset = ptrtoint i64* %offsetptr to i64
+ %offset = ptrtoint ptr %offsetptr to i64
%adjusted = sub i64 %base, %offset
call void @use64(i64 %adjusted)
ret i1 %r
}
-define i1 @t10_logical(i64 %base, i64* nonnull %offsetptr) {
+define i1 @t10_logical(i64 %base, ptr nonnull %offsetptr) {
; CHECK-LABEL: @t10_logical(
-; CHECK-NEXT: [[OFFSET:%.*]] = ptrtoint i64* [[OFFSETPTR:%.*]] to i64
+; CHECK-NEXT: [[OFFSET:%.*]] = ptrtoint ptr [[OFFSETPTR:%.*]] to i64
; CHECK-NEXT: [[ADJUSTED:%.*]] = sub i64 [[BASE:%.*]], [[OFFSET]]
; CHECK-NEXT: call void @use64(i64 [[ADJUSTED]])
; CHECK-NEXT: [[NO_UNDERFLOW:%.*]] = icmp ule i64 [[OFFSET]], [[BASE]]
; CHECK-NEXT: [[TMP1:%.*]] = icmp ult i64 [[OFFSET]], [[BASE]]
; CHECK-NEXT: ret i1 [[TMP1]]
;
- %offset = ptrtoint i64* %offsetptr to i64
+ %offset = ptrtoint ptr %offsetptr to i64
%adjusted = sub i64 %base, %offset
call void @use64(i64 %adjusted)
%r = select i1 %not_null, i1 %no_underflow, i1 false
ret i1 %r
}
-define i1 @t11_commutative(i64 %base, i64* nonnull %offsetptr) {
+define i1 @t11_commutative(i64 %base, ptr nonnull %offsetptr) {
; CHECK-LABEL: @t11_commutative(
-; CHECK-NEXT: [[OFFSET:%.*]] = ptrtoint i64* [[OFFSETPTR:%.*]] to i64
+; CHECK-NEXT: [[OFFSET:%.*]] = ptrtoint ptr [[OFFSETPTR:%.*]] to i64
; CHECK-NEXT: [[ADJUSTED:%.*]] = sub i64 [[BASE:%.*]], [[OFFSET]]
; CHECK-NEXT: call void @use64(i64 [[ADJUSTED]])
; CHECK-NEXT: [[NO_UNDERFLOW:%.*]] = icmp ule i64 [[OFFSET]], [[BASE]]
; CHECK-NEXT: [[TMP1:%.*]] = icmp ult i64 [[OFFSET]], [[BASE]]
; CHECK-NEXT: ret i1 [[TMP1]]
;
- %offset = ptrtoint i64* %offsetptr to i64
+ %offset = ptrtoint ptr %offsetptr to i64
%adjusted = sub i64 %base, %offset
call void @use64(i64 %adjusted)
ret i1 %r
}
-define i1 @t11_commutative_logical(i64 %base, i64* nonnull %offsetptr) {
+define i1 @t11_commutative_logical(i64 %base, ptr nonnull %offsetptr) {
; CHECK-LABEL: @t11_commutative_logical(
-; CHECK-NEXT: [[OFFSET:%.*]] = ptrtoint i64* [[OFFSETPTR:%.*]] to i64
+; CHECK-NEXT: [[OFFSET:%.*]] = ptrtoint ptr [[OFFSETPTR:%.*]] to i64
; CHECK-NEXT: [[ADJUSTED:%.*]] = sub i64 [[BASE:%.*]], [[OFFSET]]
; CHECK-NEXT: call void @use64(i64 [[ADJUSTED]])
; CHECK-NEXT: [[NO_UNDERFLOW:%.*]] = icmp ule i64 [[OFFSET]], [[BASE]]
; CHECK-NEXT: [[TMP1:%.*]] = icmp ult i64 [[OFFSET]], [[BASE]]
; CHECK-NEXT: ret i1 [[TMP1]]
;
- %offset = ptrtoint i64* %offsetptr to i64
+ %offset = ptrtoint ptr %offsetptr to i64
%adjusted = sub i64 %base, %offset
call void @use64(i64 %adjusted)
ret i1 %r
}
-define i1 @t12(i64 %base, i64* nonnull %offsetptr) {
+define i1 @t12(i64 %base, ptr nonnull %offsetptr) {
; CHECK-LABEL: @t12(
-; CHECK-NEXT: [[OFFSET:%.*]] = ptrtoint i64* [[OFFSETPTR:%.*]] to i64
+; CHECK-NEXT: [[OFFSET:%.*]] = ptrtoint ptr [[OFFSETPTR:%.*]] to i64
; CHECK-NEXT: [[ADJUSTED:%.*]] = sub i64 [[BASE:%.*]], [[OFFSET]]
; CHECK-NEXT: call void @use64(i64 [[ADJUSTED]])
; CHECK-NEXT: [[NO_UNDERFLOW:%.*]] = icmp ugt i64 [[OFFSET]], [[BASE]]
; CHECK-NEXT: [[TMP1:%.*]] = icmp uge i64 [[OFFSET]], [[BASE]]
; CHECK-NEXT: ret i1 [[TMP1]]
;
- %offset = ptrtoint i64* %offsetptr to i64
+ %offset = ptrtoint ptr %offsetptr to i64
%adjusted = sub i64 %base, %offset
call void @use64(i64 %adjusted)
ret i1 %r
}
-define i1 @t12_logical(i64 %base, i64* nonnull %offsetptr) {
+define i1 @t12_logical(i64 %base, ptr nonnull %offsetptr) {
; CHECK-LABEL: @t12_logical(
-; CHECK-NEXT: [[OFFSET:%.*]] = ptrtoint i64* [[OFFSETPTR:%.*]] to i64
+; CHECK-NEXT: [[OFFSET:%.*]] = ptrtoint ptr [[OFFSETPTR:%.*]] to i64
; CHECK-NEXT: [[ADJUSTED:%.*]] = sub i64 [[BASE:%.*]], [[OFFSET]]
; CHECK-NEXT: call void @use64(i64 [[ADJUSTED]])
; CHECK-NEXT: [[NO_UNDERFLOW:%.*]] = icmp ugt i64 [[OFFSET]], [[BASE]]
; CHECK-NEXT: [[TMP1:%.*]] = icmp uge i64 [[OFFSET]], [[BASE]]
; CHECK-NEXT: ret i1 [[TMP1]]
;
- %offset = ptrtoint i64* %offsetptr to i64
+ %offset = ptrtoint ptr %offsetptr to i64
%adjusted = sub i64 %base, %offset
call void @use64(i64 %adjusted)
%r = select i1 %not_null, i1 true, i1 %no_underflow
ret i1 %r
}
-define i1 @t13(i64 %base, i64* nonnull %offsetptr) {
+define i1 @t13(i64 %base, ptr nonnull %offsetptr) {
; CHECK-LABEL: @t13(
-; CHECK-NEXT: [[OFFSET:%.*]] = ptrtoint i64* [[OFFSETPTR:%.*]] to i64
+; CHECK-NEXT: [[OFFSET:%.*]] = ptrtoint ptr [[OFFSETPTR:%.*]] to i64
; CHECK-NEXT: [[ADJUSTED:%.*]] = sub i64 [[BASE:%.*]], [[OFFSET]]
; CHECK-NEXT: call void @use64(i64 [[ADJUSTED]])
; CHECK-NEXT: [[NO_UNDERFLOW:%.*]] = icmp ugt i64 [[OFFSET]], [[BASE]]
; CHECK-NEXT: [[TMP1:%.*]] = icmp uge i64 [[OFFSET]], [[BASE]]
; CHECK-NEXT: ret i1 [[TMP1]]
;
- %offset = ptrtoint i64* %offsetptr to i64
+ %offset = ptrtoint ptr %offsetptr to i64
%adjusted = sub i64 %base, %offset
call void @use64(i64 %adjusted)
ret i1 %r
}
-define i1 @t13_logical(i64 %base, i64* nonnull %offsetptr) {
+define i1 @t13_logical(i64 %base, ptr nonnull %offsetptr) {
; CHECK-LABEL: @t13_logical(
-; CHECK-NEXT: [[OFFSET:%.*]] = ptrtoint i64* [[OFFSETPTR:%.*]] to i64
+; CHECK-NEXT: [[OFFSET:%.*]] = ptrtoint ptr [[OFFSETPTR:%.*]] to i64
; CHECK-NEXT: [[ADJUSTED:%.*]] = sub i64 [[BASE:%.*]], [[OFFSET]]
; CHECK-NEXT: call void @use64(i64 [[ADJUSTED]])
; CHECK-NEXT: [[NO_UNDERFLOW:%.*]] = icmp ugt i64 [[OFFSET]], [[BASE]]
; CHECK-NEXT: [[TMP1:%.*]] = icmp uge i64 [[OFFSET]], [[BASE]]
; CHECK-NEXT: ret i1 [[TMP1]]
;
- %offset = ptrtoint i64* %offsetptr to i64
+ %offset = ptrtoint ptr %offsetptr to i64
%adjusted = sub i64 %base, %offset
call void @use64(i64 %adjusted)
; Extra uses don't change anything.
-define i16 @rotl_safe_i16_commute_extra_use(i16 %x, i16 %y, i16* %p) {
+define i16 @rotl_safe_i16_commute_extra_use(i16 %x, i16 %y, ptr %p) {
; CHECK-LABEL: @rotl_safe_i16_commute_extra_use(
; CHECK-NEXT: [[NEGY:%.*]] = sub i16 0, [[Y:%.*]]
; CHECK-NEXT: [[NEGYMASK:%.*]] = and i16 [[NEGY]], 15
-; CHECK-NEXT: store i16 [[NEGYMASK]], i16* [[P:%.*]], align 2
+; CHECK-NEXT: store i16 [[NEGYMASK]], ptr [[P:%.*]], align 2
; CHECK-NEXT: [[R:%.*]] = call i16 @llvm.fshl.i16(i16 [[X:%.*]], i16 [[X]], i16 [[Y]])
; CHECK-NEXT: ret i16 [[R]]
;
%negy = sub i16 0, %y
%ymask = and i16 %y, 15
%negymask = and i16 %negy, 15
- store i16 %negymask, i16* %p
+ store i16 %negymask, ptr %p
%shl = shl i16 %x, %ymask
%shr = lshr i16 %x, %negymask
%r = or i16 %shl, %shr
; Extra uses don't change anything.
-define i8 @rotr_safe_i8_commute_extra_use(i8 %x, i8 %y, i8* %p) {
+define i8 @rotr_safe_i8_commute_extra_use(i8 %x, i8 %y, ptr %p) {
; CHECK-LABEL: @rotr_safe_i8_commute_extra_use(
; CHECK-NEXT: [[NEGY:%.*]] = sub i8 0, [[Y:%.*]]
; CHECK-NEXT: [[YMASK:%.*]] = and i8 [[Y]], 7
; CHECK-NEXT: [[NEGYMASK:%.*]] = and i8 [[NEGY]], 7
; CHECK-NEXT: [[SHL:%.*]] = shl i8 [[X:%.*]], [[NEGYMASK]]
; CHECK-NEXT: [[SHR:%.*]] = lshr i8 [[X]], [[YMASK]]
-; CHECK-NEXT: store i8 [[SHR]], i8* [[P:%.*]], align 1
+; CHECK-NEXT: store i8 [[SHR]], ptr [[P:%.*]], align 1
; CHECK-NEXT: [[R:%.*]] = or i8 [[SHL]], [[SHR]]
; CHECK-NEXT: ret i8 [[R]]
;
%negymask = and i8 %negy, 7
%shl = shl i8 %x, %negymask
%shr = lshr i8 %x, %ymask
- store i8 %shr, i8* %p
+ store i8 %shr, ptr %p
%r = or i8 %shl, %shr
ret i8 %r
}
define i32 @rotl_constant_expr(i32 %shamt) {
; CHECK-LABEL: @rotl_constant_expr(
-; CHECK-NEXT: [[SHR:%.*]] = lshr i32 ptrtoint (i8* @external_global to i32), [[SHAMT:%.*]]
-; CHECK-NEXT: [[R:%.*]] = or i32 [[SHR]], shl (i32 ptrtoint (i8* @external_global to i32), i32 11)
+; CHECK-NEXT: [[SHR:%.*]] = lshr i32 ptrtoint (ptr @external_global to i32), [[SHAMT:%.*]]
+; CHECK-NEXT: [[R:%.*]] = or i32 [[SHR]], shl (i32 ptrtoint (ptr @external_global to i32), i32 11)
; CHECK-NEXT: ret i32 [[R]]
;
- %shr = lshr i32 ptrtoint (i8* @external_global to i32), %shamt
- %r = or i32 %shr, shl (i32 ptrtoint (i8* @external_global to i32), i32 11)
+ %shr = lshr i32 ptrtoint (ptr @external_global to i32), %shamt
+ %r = or i32 %shr, shl (i32 ptrtoint (ptr @external_global to i32), i32 11)
ret i32 %r
}
; RUN: opt -passes=instcombine -S -o - %s | FileCheck %s
-declare dso_local i32 @bar(i8*)
+declare dso_local i32 @bar(ptr)
; Function Attrs: nounwind
define internal i32 @foo() #0 !dbg !1 {
; CHECK: %[[VLA:.*]] = alloca [2 x i32]
-; CHECK: call void @llvm.dbg.declare(metadata [2 x i32]* %[[VLA]], {{.*}}, metadata !DIExpression())
+; CHECK: call void @llvm.dbg.declare(metadata ptr %[[VLA]], {{.*}}, metadata !DIExpression())
entry:
%vla = alloca i32, i64 2, align 4, !dbg !16
- call void @llvm.dbg.declare(metadata i32* %vla, metadata !19, metadata !DIExpression()), !dbg !20
- %0 = bitcast i32* %vla to i8*, !dbg !21
- %call = call i32 @bar(i8* %0), !dbg !22
+ call void @llvm.dbg.declare(metadata ptr %vla, metadata !19, metadata !DIExpression()), !dbg !20
+ %call = call i32 @bar(ptr %vla), !dbg !22
unreachable
}
ret i8 %res
}
-define i8 @test_scalar_usub_add_extra_use(i8 %a, i8 %b, i8* %p) {
+define i8 @test_scalar_usub_add_extra_use(i8 %a, i8 %b, ptr %p) {
; CHECK-LABEL: @test_scalar_usub_add_extra_use(
; CHECK-NEXT: [[SAT:%.*]] = call i8 @llvm.usub.sat.i8(i8 [[A:%.*]], i8 [[B:%.*]])
-; CHECK-NEXT: store i8 [[SAT]], i8* [[P:%.*]], align 1
+; CHECK-NEXT: store i8 [[SAT]], ptr [[P:%.*]], align 1
; CHECK-NEXT: [[RES:%.*]] = add i8 [[SAT]], [[B]]
; CHECK-NEXT: ret i8 [[RES]]
;
%sat = call i8 @llvm.usub.sat.i8(i8 %a, i8 %b)
- store i8 %sat, i8* %p
+ store i8 %sat, ptr %p
%res = add i8 %sat, %b
ret i8 %res
}
ret i8 %res
}
-define i8 @test_scalar_usub_sub_extra_use(i8 %a, i8 %b, i8* %p) {
+define i8 @test_scalar_usub_sub_extra_use(i8 %a, i8 %b, ptr %p) {
; CHECK-LABEL: @test_scalar_usub_sub_extra_use(
; CHECK-NEXT: [[SAT:%.*]] = call i8 @llvm.usub.sat.i8(i8 [[A:%.*]], i8 [[B:%.*]])
-; CHECK-NEXT: store i8 [[SAT]], i8* [[P:%.*]], align 1
+; CHECK-NEXT: store i8 [[SAT]], ptr [[P:%.*]], align 1
; CHECK-NEXT: [[RES:%.*]] = sub i8 [[A]], [[SAT]]
; CHECK-NEXT: ret i8 [[RES]]
;
%sat = call i8 @llvm.usub.sat.i8(i8 %a, i8 %b)
- store i8 %sat, i8* %p
+ store i8 %sat, ptr %p
%res = sub i8 %a, %sat
ret i8 %res
}
ret i8 %res
}
-define i8 @test_scalar_uadd_sub_extra_use(i8 %a, i8 %b, i8* %p) {
+define i8 @test_scalar_uadd_sub_extra_use(i8 %a, i8 %b, ptr %p) {
; CHECK-LABEL: @test_scalar_uadd_sub_extra_use(
; CHECK-NEXT: [[SAT:%.*]] = call i8 @llvm.uadd.sat.i8(i8 [[A:%.*]], i8 [[B:%.*]])
-; CHECK-NEXT: store i8 [[SAT]], i8* [[P:%.*]], align 1
+; CHECK-NEXT: store i8 [[SAT]], ptr [[P:%.*]], align 1
; CHECK-NEXT: [[RES:%.*]] = sub i8 [[SAT]], [[B]]
; CHECK-NEXT: ret i8 [[RES]]
;
%sat = call i8 @llvm.uadd.sat.i8(i8 %a, i8 %b)
- store i8 %sat, i8* %p
+ store i8 %sat, ptr %p
%res = sub i8 %sat, %b
ret i8 %res
}
; NOTE: Assertions have been autogenerated by utils/update_test_checks.py
; RUN: opt -passes=instcombine -S < %s | FileCheck %s
-define void @trunc_nxv2i64_to_nxv2i32(i32* %ptr, <vscale x 4 x i32> %v) {
+define void @trunc_nxv2i64_to_nxv2i32(ptr %ptr, <vscale x 4 x i32> %v) {
; CHECK-LABEL: @trunc_nxv2i64_to_nxv2i32(
; CHECK-NEXT: entry:
; CHECK-NEXT: [[TMP0:%.*]] = call <vscale x 16 x i1> @llvm.aarch64.sve.ptrue.nxv16i1(i32 31)
; CHECK-NEXT: [[TMP1:%.*]] = bitcast <vscale x 4 x i32> [[V:%.*]] to <vscale x 2 x i64>
; CHECK-NEXT: [[TMP2:%.*]] = call <vscale x 2 x i1> @llvm.aarch64.sve.convert.from.svbool.nxv2i1(<vscale x 16 x i1> [[TMP0]])
; CHECK-NEXT: [[TMP3:%.*]] = trunc <vscale x 2 x i64> [[TMP1]] to <vscale x 2 x i32>
-; CHECK-NEXT: call void @llvm.aarch64.sve.st1.nxv2i32(<vscale x 2 x i32> [[TMP3]], <vscale x 2 x i1> [[TMP2]], i32* [[PTR:%.*]])
+; CHECK-NEXT: call void @llvm.aarch64.sve.st1.nxv2i32(<vscale x 2 x i32> [[TMP3]], <vscale x 2 x i1> [[TMP2]], ptr [[PTR:%.*]])
; CHECK-NEXT: ret void
;
entry:
%1 = bitcast <vscale x 4 x i32> %v to <vscale x 2 x i64>
%2 = call <vscale x 2 x i1> @llvm.aarch64.sve.convert.from.svbool.nxv2i1(<vscale x 16 x i1> %0)
%3 = trunc <vscale x 2 x i64> %1 to <vscale x 2 x i32>
- call void @llvm.aarch64.sve.st1.nxv2i32(<vscale x 2 x i32> %3, <vscale x 2 x i1> %2, i32* %ptr)
+ call void @llvm.aarch64.sve.st1.nxv2i32(<vscale x 2 x i32> %3, <vscale x 2 x i1> %2, ptr %ptr)
ret void
}
-declare void @llvm.aarch64.sve.st1.nxv2i32(<vscale x 2 x i32>, <vscale x 2 x i1>, i32*)
+declare void @llvm.aarch64.sve.st1.nxv2i32(<vscale x 2 x i32>, <vscale x 2 x i1>, ptr)
declare <vscale x 2 x i1> @llvm.aarch64.sve.convert.from.svbool.nxv2i1(<vscale x 16 x i1>)
declare <vscale x 16 x i1> @llvm.aarch64.sve.ptrue.nxv16i1(i32 %pattern)
; NOTE: Assertions have been autogenerated by utils/update_test_checks.py
; RUN: opt -passes=instcombine -S < %s | FileCheck %s
-define i32 @extract_load(<4 x i32>* %p) {
+define i32 @extract_load(ptr %p) {
;
; CHECK-LABEL: @extract_load(
-; CHECK-NEXT: [[X:%.*]] = load <4 x i32>, <4 x i32>* [[P:%.*]], align 4
+; CHECK-NEXT: [[X:%.*]] = load <4 x i32>, ptr [[P:%.*]], align 4
; CHECK-NEXT: [[EXT:%.*]] = extractelement <4 x i32> [[X]], i64 1
; CHECK-NEXT: ret i32 [[EXT]]
;
- %x = load <4 x i32>, <4 x i32>* %p, align 4
+ %x = load <4 x i32>, ptr %p, align 4
%ext = extractelement <4 x i32> %x, i32 1
ret i32 %ext
}
-define double @extract_load_fp(<4 x double>* %p) {
+define double @extract_load_fp(ptr %p) {
;
; CHECK-LABEL: @extract_load_fp(
-; CHECK-NEXT: [[X:%.*]] = load <4 x double>, <4 x double>* [[P:%.*]], align 32
+; CHECK-NEXT: [[X:%.*]] = load <4 x double>, ptr [[P:%.*]], align 32
; CHECK-NEXT: [[EXT:%.*]] = extractelement <4 x double> [[X]], i64 3
; CHECK-NEXT: ret double [[EXT]]
;
- %x = load <4 x double>, <4 x double>* %p, align 32
+ %x = load <4 x double>, ptr %p, align 32
%ext = extractelement <4 x double> %x, i32 3
ret double %ext
}
-define double @extract_load_volatile(<4 x double>* %p) {
+define double @extract_load_volatile(ptr %p) {
;
; CHECK-LABEL: @extract_load_volatile(
-; CHECK-NEXT: [[X:%.*]] = load volatile <4 x double>, <4 x double>* [[P:%.*]], align 32
+; CHECK-NEXT: [[X:%.*]] = load volatile <4 x double>, ptr [[P:%.*]], align 32
; CHECK-NEXT: [[EXT:%.*]] = extractelement <4 x double> [[X]], i64 2
; CHECK-NEXT: ret double [[EXT]]
;
- %x = load volatile <4 x double>, <4 x double>* %p
+ %x = load volatile <4 x double>, ptr %p
%ext = extractelement <4 x double> %x, i32 2
ret double %ext
}
-define double @extract_load_extra_use(<4 x double>* %p, <4 x double>* %p2) {
+define double @extract_load_extra_use(ptr %p, ptr %p2) {
;
; CHECK-LABEL: @extract_load_extra_use(
-; CHECK-NEXT: [[X:%.*]] = load <4 x double>, <4 x double>* [[P:%.*]], align 8
+; CHECK-NEXT: [[X:%.*]] = load <4 x double>, ptr [[P:%.*]], align 8
; CHECK-NEXT: [[EXT:%.*]] = extractelement <4 x double> [[X]], i64 0
-; CHECK-NEXT: store <4 x double> [[X]], <4 x double>* [[P2:%.*]], align 32
+; CHECK-NEXT: store <4 x double> [[X]], ptr [[P2:%.*]], align 32
; CHECK-NEXT: ret double [[EXT]]
;
- %x = load <4 x double>, <4 x double>* %p, align 8
+ %x = load <4 x double>, ptr %p, align 8
%ext = extractelement <4 x double> %x, i32 0
- store <4 x double> %x, <4 x double>* %p2
+ store <4 x double> %x, ptr %p2
ret double %ext
}
-define double @extract_load_variable_index(<4 x double>* %p, i32 %y) {
+define double @extract_load_variable_index(ptr %p, i32 %y) {
;
; CHECK-LABEL: @extract_load_variable_index(
-; CHECK-NEXT: [[X:%.*]] = load <4 x double>, <4 x double>* [[P:%.*]], align 32
+; CHECK-NEXT: [[X:%.*]] = load <4 x double>, ptr [[P:%.*]], align 32
; CHECK-NEXT: [[EXT:%.*]] = extractelement <4 x double> [[X]], i32 [[Y:%.*]]
; CHECK-NEXT: ret double [[EXT]]
;
- %x = load <4 x double>, <4 x double>* %p
+ %x = load <4 x double>, ptr %p
%ext = extractelement <4 x double> %x, i32 %y
ret double %ext
}
-define void @scalarize_phi(i32 * %n, float * %inout) {
+define void @scalarize_phi(ptr %n, ptr %inout) {
;
; CHECK-LABEL: @scalarize_phi(
; CHECK-NEXT: entry:
-; CHECK-NEXT: [[T0:%.*]] = load volatile float, float* [[INOUT:%.*]], align 4
+; CHECK-NEXT: [[T0:%.*]] = load volatile float, ptr [[INOUT:%.*]], align 4
; CHECK-NEXT: br label [[FOR_COND:%.*]]
; CHECK: for.cond:
; CHECK-NEXT: [[TMP0:%.*]] = phi float [ [[T0]], [[ENTRY:%.*]] ], [ [[TMP1:%.*]], [[FOR_BODY:%.*]] ]
; CHECK-NEXT: [[I_0:%.*]] = phi i32 [ 0, [[ENTRY]] ], [ [[INC:%.*]], [[FOR_BODY]] ]
-; CHECK-NEXT: [[T1:%.*]] = load i32, i32* [[N:%.*]], align 4
+; CHECK-NEXT: [[T1:%.*]] = load i32, ptr [[N:%.*]], align 4
; CHECK-NEXT: [[CMP_NOT:%.*]] = icmp eq i32 [[I_0]], [[T1]]
; CHECK-NEXT: br i1 [[CMP_NOT]], label [[FOR_END:%.*]], label [[FOR_BODY]]
; CHECK: for.body:
-; CHECK-NEXT: store volatile float [[TMP0]], float* [[INOUT]], align 4
+; CHECK-NEXT: store volatile float [[TMP0]], ptr [[INOUT]], align 4
; CHECK-NEXT: [[TMP1]] = fmul float [[TMP0]], 0x4002A3D700000000
; CHECK-NEXT: [[INC]] = add nuw nsw i32 [[I_0]], 1
; CHECK-NEXT: br label [[FOR_COND]]
; CHECK-NEXT: ret void
;
entry:
- %t0 = load volatile float, float * %inout, align 4
+ %t0 = load volatile float, ptr %inout, align 4
%insert = insertelement <4 x float> poison, float %t0, i32 0
%splat = shufflevector <4 x float> %insert, <4 x float> poison, <4 x i32> zeroinitializer
%insert1 = insertelement <4 x float> poison, float 3.0, i32 0
for.cond:
%x.0 = phi <4 x float> [ %splat, %entry ], [ %mul, %for.body ]
%i.0 = phi i32 [ 0, %entry ], [ %inc, %for.body ]
- %t1 = load i32, i32 * %n, align 4
+ %t1 = load i32, ptr %n, align 4
%cmp = icmp ne i32 %i.0, %t1
br i1 %cmp, label %for.body, label %for.end
for.body:
%t2 = extractelement <4 x float> %x.0, i32 1
- store volatile float %t2, float * %inout, align 4
+ store volatile float %t2, ptr %inout, align 4
%mul = fmul <4 x float> %x.0, <float 0x4002A3D700000000, float 0x4002A3D700000000, float 0x4002A3D700000000, float 0x4002A3D700000000>
%inc = add nsw i32 %i.0, 1
br label %for.cond
ret i8 %r
}
-define float @extract_element_load(<4 x float> %x, <4 x float>* %ptr) {
+define float @extract_element_load(<4 x float> %x, ptr %ptr) {
;
; CHECK-LABEL: @extract_element_load(
-; CHECK-NEXT: [[LOAD:%.*]] = load <4 x float>, <4 x float>* [[PTR:%.*]], align 16
+; CHECK-NEXT: [[LOAD:%.*]] = load <4 x float>, ptr [[PTR:%.*]], align 16
; CHECK-NEXT: [[TMP1:%.*]] = extractelement <4 x float> [[LOAD]], i64 2
; CHECK-NEXT: [[TMP2:%.*]] = extractelement <4 x float> [[X:%.*]], i64 2
; CHECK-NEXT: [[R:%.*]] = fadd float [[TMP1]], [[TMP2]]
; CHECK-NEXT: ret float [[R]]
;
- %load = load <4 x float>, <4 x float>* %ptr
+ %load = load <4 x float>, ptr %ptr
%add = fadd <4 x float> %x, %load
%r = extractelement <4 x float> %add, i32 2
ret float %r
}
-define float @extract_element_multi_Use_load(<4 x float> %x, <4 x float>* %ptr0, <4 x float>* %ptr1) {
+define float @extract_element_multi_Use_load(<4 x float> %x, ptr %ptr0, ptr %ptr1) {
;
; CHECK-LABEL: @extract_element_multi_Use_load(
-; CHECK-NEXT: [[LOAD:%.*]] = load <4 x float>, <4 x float>* [[PTR0:%.*]], align 16
-; CHECK-NEXT: store <4 x float> [[LOAD]], <4 x float>* [[PTR1:%.*]], align 16
+; CHECK-NEXT: [[LOAD:%.*]] = load <4 x float>, ptr [[PTR0:%.*]], align 16
+; CHECK-NEXT: store <4 x float> [[LOAD]], ptr [[PTR1:%.*]], align 16
; CHECK-NEXT: [[ADD:%.*]] = fadd <4 x float> [[LOAD]], [[X:%.*]]
; CHECK-NEXT: [[R:%.*]] = extractelement <4 x float> [[ADD]], i64 2
; CHECK-NEXT: ret float [[R]]
;
- %load = load <4 x float>, <4 x float>* %ptr0
- store <4 x float> %load, <4 x float>* %ptr1
+ %load = load <4 x float>, ptr %ptr0
+ store <4 x float> %load, ptr %ptr1
%add = fadd <4 x float> %x, %load
%r = extractelement <4 x float> %add, i32 2
ret float %r
;
; CHECK-LABEL: @extractelt_vector_fcmp_not_cheap_to_scalarize_multi_use(
; CHECK-NEXT: [[ADD:%.*]] = fadd <2 x float> [[ARG1:%.*]], [[ARG2:%.*]]
-; CHECK-NEXT: store volatile <2 x float> [[ADD]], <2 x float>* undef, align 8
+; CHECK-NEXT: store volatile <2 x float> [[ADD]], ptr undef, align 8
; CHECK-NEXT: [[CMP:%.*]] = fcmp oeq <2 x float> [[ADD]], [[ARG0:%.*]]
; CHECK-NEXT: [[EXT:%.*]] = extractelement <2 x i1> [[CMP]], i64 0
; CHECK-NEXT: ret i1 [[EXT]]
;
%add = fadd <2 x float> %arg1, %arg2
- store volatile <2 x float> %add, <2 x float>* undef
+ store volatile <2 x float> %add, ptr undef
%cmp = fcmp oeq <2 x float> %arg0, %add
%ext = extractelement <2 x i1> %cmp, i32 0
ret i1 %ext
; NOTE: Assertions have been autogenerated by utils/update_test_checks.py
; RUN: opt -passes=instcombine -S < %s | FileCheck %s
-define i32 @extract_load(<4 x i32>* %p) {
+define i32 @extract_load(ptr %p) {
;
; CHECK-LABEL: @extract_load(
-; CHECK-NEXT: [[X:%.*]] = load <4 x i32>, <4 x i32>* [[P:%.*]], align 4
+; CHECK-NEXT: [[X:%.*]] = load <4 x i32>, ptr [[P:%.*]], align 4
; CHECK-NEXT: [[EXT:%.*]] = extractelement <4 x i32> [[X]], i64 1
; CHECK-NEXT: ret i32 [[EXT]]
;
- %x = load <4 x i32>, <4 x i32>* %p, align 4
+ %x = load <4 x i32>, ptr %p, align 4
%ext = extractelement <4 x i32> %x, i32 1
ret i32 %ext
}
-define double @extract_load_fp(<4 x double>* %p) {
+define double @extract_load_fp(ptr %p) {
;
; CHECK-LABEL: @extract_load_fp(
-; CHECK-NEXT: [[X:%.*]] = load <4 x double>, <4 x double>* [[P:%.*]], align 32
+; CHECK-NEXT: [[X:%.*]] = load <4 x double>, ptr [[P:%.*]], align 32
; CHECK-NEXT: [[EXT:%.*]] = extractelement <4 x double> [[X]], i64 3
; CHECK-NEXT: ret double [[EXT]]
;
- %x = load <4 x double>, <4 x double>* %p, align 32
+ %x = load <4 x double>, ptr %p, align 32
%ext = extractelement <4 x double> %x, i32 3
ret double %ext
}
-define double @extract_load_volatile(<4 x double>* %p) {
+define double @extract_load_volatile(ptr %p) {
;
; CHECK-LABEL: @extract_load_volatile(
-; CHECK-NEXT: [[X:%.*]] = load volatile <4 x double>, <4 x double>* [[P:%.*]], align 32
+; CHECK-NEXT: [[X:%.*]] = load volatile <4 x double>, ptr [[P:%.*]], align 32
; CHECK-NEXT: [[EXT:%.*]] = extractelement <4 x double> [[X]], i64 2
; CHECK-NEXT: ret double [[EXT]]
;
- %x = load volatile <4 x double>, <4 x double>* %p
+ %x = load volatile <4 x double>, ptr %p
%ext = extractelement <4 x double> %x, i32 2
ret double %ext
}
-define double @extract_load_extra_use(<4 x double>* %p, <4 x double>* %p2) {
+define double @extract_load_extra_use(ptr %p, ptr %p2) {
;
; CHECK-LABEL: @extract_load_extra_use(
-; CHECK-NEXT: [[X:%.*]] = load <4 x double>, <4 x double>* [[P:%.*]], align 8
+; CHECK-NEXT: [[X:%.*]] = load <4 x double>, ptr [[P:%.*]], align 8
; CHECK-NEXT: [[EXT:%.*]] = extractelement <4 x double> [[X]], i64 0
-; CHECK-NEXT: store <4 x double> [[X]], <4 x double>* [[P2:%.*]], align 32
+; CHECK-NEXT: store <4 x double> [[X]], ptr [[P2:%.*]], align 32
; CHECK-NEXT: ret double [[EXT]]
;
- %x = load <4 x double>, <4 x double>* %p, align 8
+ %x = load <4 x double>, ptr %p, align 8
%ext = extractelement <4 x double> %x, i32 0
- store <4 x double> %x, <4 x double>* %p2
+ store <4 x double> %x, ptr %p2
ret double %ext
}
-define double @extract_load_variable_index(<4 x double>* %p, i32 %y) {
+define double @extract_load_variable_index(ptr %p, i32 %y) {
;
; CHECK-LABEL: @extract_load_variable_index(
-; CHECK-NEXT: [[X:%.*]] = load <4 x double>, <4 x double>* [[P:%.*]], align 32
+; CHECK-NEXT: [[X:%.*]] = load <4 x double>, ptr [[P:%.*]], align 32
; CHECK-NEXT: [[EXT:%.*]] = extractelement <4 x double> [[X]], i32 [[Y:%.*]]
; CHECK-NEXT: ret double [[EXT]]
;
- %x = load <4 x double>, <4 x double>* %p
+ %x = load <4 x double>, ptr %p
%ext = extractelement <4 x double> %x, i32 %y
ret double %ext
}
-define void @scalarize_phi(i32 * %n, float * %inout) {
+define void @scalarize_phi(ptr %n, ptr %inout) {
;
; CHECK-LABEL: @scalarize_phi(
; CHECK-NEXT: entry:
-; CHECK-NEXT: [[T0:%.*]] = load volatile float, float* [[INOUT:%.*]], align 4
+; CHECK-NEXT: [[T0:%.*]] = load volatile float, ptr [[INOUT:%.*]], align 4
; CHECK-NEXT: br label [[FOR_COND:%.*]]
; CHECK: for.cond:
; CHECK-NEXT: [[TMP0:%.*]] = phi float [ [[T0]], [[ENTRY:%.*]] ], [ [[TMP1:%.*]], [[FOR_BODY:%.*]] ]
; CHECK-NEXT: [[I_0:%.*]] = phi i32 [ 0, [[ENTRY]] ], [ [[INC:%.*]], [[FOR_BODY]] ]
-; CHECK-NEXT: [[T1:%.*]] = load i32, i32* [[N:%.*]], align 4
+; CHECK-NEXT: [[T1:%.*]] = load i32, ptr [[N:%.*]], align 4
; CHECK-NEXT: [[CMP_NOT:%.*]] = icmp eq i32 [[I_0]], [[T1]]
; CHECK-NEXT: br i1 [[CMP_NOT]], label [[FOR_END:%.*]], label [[FOR_BODY]]
; CHECK: for.body:
-; CHECK-NEXT: store volatile float [[TMP0]], float* [[INOUT]], align 4
+; CHECK-NEXT: store volatile float [[TMP0]], ptr [[INOUT]], align 4
; CHECK-NEXT: [[TMP1]] = fmul float [[TMP0]], 0x4002A3D700000000
; CHECK-NEXT: [[INC]] = add nuw nsw i32 [[I_0]], 1
; CHECK-NEXT: br label [[FOR_COND]]
; CHECK-NEXT: ret void
;
entry:
- %t0 = load volatile float, float * %inout, align 4
+ %t0 = load volatile float, ptr %inout, align 4
%insert = insertelement <4 x float> undef, float %t0, i32 0
%splat = shufflevector <4 x float> %insert, <4 x float> undef, <4 x i32> zeroinitializer
%insert1 = insertelement <4 x float> undef, float 3.0, i32 0
for.cond:
%x.0 = phi <4 x float> [ %splat, %entry ], [ %mul, %for.body ]
%i.0 = phi i32 [ 0, %entry ], [ %inc, %for.body ]
- %t1 = load i32, i32 * %n, align 4
+ %t1 = load i32, ptr %n, align 4
%cmp = icmp ne i32 %i.0, %t1
br i1 %cmp, label %for.body, label %for.end
for.body:
%t2 = extractelement <4 x float> %x.0, i32 1
- store volatile float %t2, float * %inout, align 4
+ store volatile float %t2, ptr %inout, align 4
%mul = fmul <4 x float> %x.0, <float 0x4002A3D700000000, float 0x4002A3D700000000, float 0x4002A3D700000000, float 0x4002A3D700000000>
%inc = add nsw i32 %i.0, 1
br label %for.cond
ret i8 %r
}
-define float @extract_element_load(<4 x float> %x, <4 x float>* %ptr) {
+define float @extract_element_load(<4 x float> %x, ptr %ptr) {
;
; CHECK-LABEL: @extract_element_load(
-; CHECK-NEXT: [[LOAD:%.*]] = load <4 x float>, <4 x float>* [[PTR:%.*]], align 16
+; CHECK-NEXT: [[LOAD:%.*]] = load <4 x float>, ptr [[PTR:%.*]], align 16
; CHECK-NEXT: [[TMP1:%.*]] = extractelement <4 x float> [[LOAD]], i64 2
; CHECK-NEXT: [[TMP2:%.*]] = extractelement <4 x float> [[X:%.*]], i64 2
; CHECK-NEXT: [[R:%.*]] = fadd float [[TMP1]], [[TMP2]]
; CHECK-NEXT: ret float [[R]]
;
- %load = load <4 x float>, <4 x float>* %ptr
+ %load = load <4 x float>, ptr %ptr
%add = fadd <4 x float> %x, %load
%r = extractelement <4 x float> %add, i32 2
ret float %r
}
-define float @extract_element_multi_Use_load(<4 x float> %x, <4 x float>* %ptr0, <4 x float>* %ptr1) {
+define float @extract_element_multi_Use_load(<4 x float> %x, ptr %ptr0, ptr %ptr1) {
;
; CHECK-LABEL: @extract_element_multi_Use_load(
-; CHECK-NEXT: [[LOAD:%.*]] = load <4 x float>, <4 x float>* [[PTR0:%.*]], align 16
-; CHECK-NEXT: store <4 x float> [[LOAD]], <4 x float>* [[PTR1:%.*]], align 16
+; CHECK-NEXT: [[LOAD:%.*]] = load <4 x float>, ptr [[PTR0:%.*]], align 16
+; CHECK-NEXT: store <4 x float> [[LOAD]], ptr [[PTR1:%.*]], align 16
; CHECK-NEXT: [[ADD:%.*]] = fadd <4 x float> [[LOAD]], [[X:%.*]]
; CHECK-NEXT: [[R:%.*]] = extractelement <4 x float> [[ADD]], i64 2
; CHECK-NEXT: ret float [[R]]
;
- %load = load <4 x float>, <4 x float>* %ptr0
- store <4 x float> %load, <4 x float>* %ptr1
+ %load = load <4 x float>, ptr %ptr0
+ store <4 x float> %load, ptr %ptr1
%add = fadd <4 x float> %x, %load
%r = extractelement <4 x float> %add, i32 2
ret float %r
;
; CHECK-LABEL: @extractelt_vector_fcmp_not_cheap_to_scalarize_multi_use(
; CHECK-NEXT: [[ADD:%.*]] = fadd <2 x float> [[ARG1:%.*]], [[ARG2:%.*]]
-; CHECK-NEXT: store volatile <2 x float> [[ADD]], <2 x float>* undef, align 8
+; CHECK-NEXT: store volatile <2 x float> [[ADD]], ptr undef, align 8
; CHECK-NEXT: [[CMP:%.*]] = fcmp oeq <2 x float> [[ADD]], [[ARG0:%.*]]
; CHECK-NEXT: [[EXT:%.*]] = extractelement <2 x i1> [[CMP]], i64 0
; CHECK-NEXT: ret i1 [[EXT]]
;
%add = fadd <2 x float> %arg1, %arg2
- store volatile <2 x float> %add, <2 x float>* undef
+ store volatile <2 x float> %add, ptr undef
%cmp = fcmp oeq <2 x float> %arg0, %add
%ext = extractelement <2 x i1> %cmp, i32 0
ret i1 %ext
; currently opt folds (sub nsw i64 0, constexpr) -> (sub i64, 0, constexpr).
; sdiv canonicalize requires a nsw sub.
; CHECK-LABEL: @test_sdiv_canonicalize_constexpr(
-; CHECK-NEXT: [[B4:%.*]] = sdiv i64 [[L1:%.*]], sub (i64 0, i64 ptrtoint (i32* @X to i64))
+; CHECK-NEXT: [[B4:%.*]] = sdiv i64 [[L1:%.*]], sub (i64 0, i64 ptrtoint (ptr @X to i64))
; CHECK-NEXT: ret i64 [[B4]]
;
- %v1 = ptrtoint i32* @X to i64
+ %v1 = ptrtoint ptr @X to i64
%B8 = sub nsw i64 0, %v1
%B4 = sdiv i64 %L1, %B8
ret i64 %B4
define i1 @demorgan_select_infloop1(i1 %L) {
; CHECK-LABEL: @demorgan_select_infloop1(
; CHECK-NEXT: [[NOT_L:%.*]] = xor i1 [[L:%.*]], true
-; CHECK-NEXT: [[C15:%.*]] = select i1 [[NOT_L]], i1 xor (i1 and (i1 icmp eq (i16* getelementptr inbounds (i16, i16* @g2, i64 1), i16* @g1), i1 icmp ne (i16* getelementptr inbounds (i16, i16* @g2, i64 1), i16* @g1)), i1 true), i1 false
+; CHECK-NEXT: [[C15:%.*]] = select i1 [[NOT_L]], i1 xor (i1 and (i1 icmp eq (ptr getelementptr inbounds (i16, ptr @g2, i64 1), ptr @g1), i1 icmp ne (ptr getelementptr inbounds (i16, ptr @g2, i64 1), ptr @g1)), i1 true), i1 false
; CHECK-NEXT: ret i1 [[C15]]
;
%not.L = xor i1 %L, true
- %C15 = select i1 %not.L, i1 xor (i1 and (i1 icmp eq (i16* getelementptr inbounds (i16, i16* @g2, i64 1), i16* @g1), i1 icmp ne (i16* getelementptr inbounds (i16, i16* @g2, i64 1), i16* @g1)), i1 true), i1 false
+ %C15 = select i1 %not.L, i1 xor (i1 and (i1 icmp eq (ptr getelementptr inbounds (i16, ptr @g2, i64 1), ptr @g1), i1 icmp ne (ptr getelementptr inbounds (i16, ptr @g2, i64 1), ptr @g1)), i1 true), i1 false
ret i1 %C15
}
define i1 @demorgan_select_infloop2(i1 %L) {
; CHECK-LABEL: @demorgan_select_infloop2(
; CHECK-NEXT: [[NOT_L:%.*]] = xor i1 [[L:%.*]], true
-; CHECK-NEXT: [[C15:%.*]] = select i1 [[NOT_L]], i1 true, i1 xor (i1 and (i1 icmp eq (i16* getelementptr inbounds (i16, i16* @g2, i64 1), i16* @g1), i1 icmp ne (i16* getelementptr inbounds (i16, i16* @g2, i64 1), i16* @g1)), i1 true)
+; CHECK-NEXT: [[C15:%.*]] = select i1 [[NOT_L]], i1 true, i1 xor (i1 and (i1 icmp eq (ptr getelementptr inbounds (i16, ptr @g2, i64 1), ptr @g1), i1 icmp ne (ptr getelementptr inbounds (i16, ptr @g2, i64 1), ptr @g1)), i1 true)
; CHECK-NEXT: ret i1 [[C15]]
;
%not.L = xor i1 %L, true
- %C15 = select i1 %not.L, i1 true, i1 xor (i1 and (i1 icmp eq (i16* getelementptr inbounds (i16, i16* @g2, i64 1), i16* @g1), i1 icmp ne (i16* getelementptr inbounds (i16, i16* @g2, i64 1), i16* @g1)), i1 true)
+ %C15 = select i1 %not.L, i1 true, i1 xor (i1 and (i1 icmp eq (ptr getelementptr inbounds (i16, ptr @g2, i64 1), ptr @g1), i1 icmp ne (ptr getelementptr inbounds (i16, ptr @g2, i64 1), ptr @g1)), i1 true)
ret i1 %C15
}
; loops.
define i32 @select_replace_constexpr(i32 %x, i32 %y, i32 %z) {
; CHECK-LABEL: @select_replace_constexpr(
-; CHECK-NEXT: [[C:%.*]] = icmp eq i32 [[X:%.*]], ptrtoint (i32* @g to i32)
+; CHECK-NEXT: [[C:%.*]] = icmp eq i32 [[X:%.*]], ptrtoint (ptr @g to i32)
; CHECK-NEXT: [[ADD:%.*]] = add i32 [[X]], [[Y:%.*]]
; CHECK-NEXT: [[S:%.*]] = select i1 [[C]], i32 [[ADD]], i32 [[Z:%.*]]
; CHECK-NEXT: ret i32 [[S]]
;
- %c = icmp eq i32 %x, ptrtoint (i32* @g to i32)
+ %c = icmp eq i32 %x, ptrtoint (ptr @g to i32)
%add = add i32 %x, %y
%s = select i1 %c, i32 %add, i32 %z
ret i32 %s
ret <2 x i32> %res
}
-define i32 @test_multiuse_def(i32 %x, i32* %p) {
+define i32 @test_multiuse_def(i32 %x, ptr %p) {
; CHECK-LABEL: @test_multiuse_def(
; CHECK-NEXT: [[CT:%.*]] = tail call i32 @llvm.ctlz.i32(i32 [[X:%.*]], i1 false), !range [[RNG1]]
-; CHECK-NEXT: store i32 [[CT]], i32* [[P:%.*]], align 4
+; CHECK-NEXT: store i32 [[CT]], ptr [[P:%.*]], align 4
; CHECK-NEXT: ret i32 [[CT]]
;
%ct = tail call i32 @llvm.ctlz.i32(i32 %x, i1 false)
%tobool = icmp ne i32 %x, 0
%cond = select i1 %tobool, i32 %ct, i32 32
- store i32 %ct, i32* %p
+ store i32 %ct, ptr %p
ret i32 %cond
}
-define i32 @test_multiuse_undef(i32 %x, i32* %p) {
+define i32 @test_multiuse_undef(i32 %x, ptr %p) {
; CHECK-LABEL: @test_multiuse_undef(
; CHECK-NEXT: [[CT:%.*]] = tail call i32 @llvm.ctlz.i32(i32 [[X:%.*]], i1 false), !range [[RNG1]]
-; CHECK-NEXT: store i32 [[CT]], i32* [[P:%.*]], align 4
+; CHECK-NEXT: store i32 [[CT]], ptr [[P:%.*]], align 4
; CHECK-NEXT: ret i32 [[CT]]
;
%ct = tail call i32 @llvm.ctlz.i32(i32 %x, i1 true)
%tobool = icmp ne i32 %x, 0
%cond = select i1 %tobool, i32 %ct, i32 32
- store i32 %ct, i32* %p
+ store i32 %ct, ptr %p
ret i32 %cond
}
-define i64 @test_multiuse_zext_def(i32 %x, i64* %p) {
+define i64 @test_multiuse_zext_def(i32 %x, ptr %p) {
; CHECK-LABEL: @test_multiuse_zext_def(
; CHECK-NEXT: [[CT:%.*]] = tail call i32 @llvm.cttz.i32(i32 [[X:%.*]], i1 false), !range [[RNG1]]
; CHECK-NEXT: [[CONV:%.*]] = zext i32 [[CT]] to i64
-; CHECK-NEXT: store i64 [[CONV]], i64* [[P:%.*]], align 4
+; CHECK-NEXT: store i64 [[CONV]], ptr [[P:%.*]], align 4
; CHECK-NEXT: ret i64 [[CONV]]
;
%ct = tail call i32 @llvm.cttz.i32(i32 %x, i1 false)
%conv = zext i32 %ct to i64
%tobool = icmp ne i32 %x, 0
%cond = select i1 %tobool, i64 %conv, i64 32
- store i64 %conv, i64* %p
+ store i64 %conv, ptr %p
ret i64 %cond
}
-define i64 @test_multiuse_zext_undef(i32 %x, i64* %p) {
+define i64 @test_multiuse_zext_undef(i32 %x, ptr %p) {
; CHECK-LABEL: @test_multiuse_zext_undef(
; CHECK-NEXT: [[CT:%.*]] = tail call i32 @llvm.cttz.i32(i32 [[X:%.*]], i1 false), !range [[RNG1]]
; CHECK-NEXT: [[CONV:%.*]] = zext i32 [[CT]] to i64
-; CHECK-NEXT: store i64 [[CONV]], i64* [[P:%.*]], align 4
+; CHECK-NEXT: store i64 [[CONV]], ptr [[P:%.*]], align 4
; CHECK-NEXT: ret i64 [[CONV]]
;
%ct = tail call i32 @llvm.cttz.i32(i32 %x, i1 true)
%conv = zext i32 %ct to i64
%tobool = icmp ne i32 %x, 0
%cond = select i1 %tobool, i64 %conv, i64 32
- store i64 %conv, i64* %p
+ store i64 %conv, ptr %p
ret i64 %cond
}
-define i16 @test_multiuse_trunc_def(i64 %x, i16 *%p) {
+define i16 @test_multiuse_trunc_def(i64 %x, ptr %p) {
; CHECK-LABEL: @test_multiuse_trunc_def(
; CHECK-NEXT: [[CT:%.*]] = tail call i64 @llvm.cttz.i64(i64 [[X:%.*]], i1 false), !range [[RNG2]]
; CHECK-NEXT: [[CONV:%.*]] = trunc i64 [[CT]] to i16
-; CHECK-NEXT: store i16 [[CONV]], i16* [[P:%.*]], align 2
+; CHECK-NEXT: store i16 [[CONV]], ptr [[P:%.*]], align 2
; CHECK-NEXT: ret i16 [[CONV]]
;
%ct = tail call i64 @llvm.cttz.i64(i64 %x, i1 false)
%conv = trunc i64 %ct to i16
%tobool = icmp ne i64 %x, 0
%cond = select i1 %tobool, i16 %conv, i16 64
- store i16 %conv, i16* %p
+ store i16 %conv, ptr %p
ret i16 %cond
}
-define i16 @test_multiuse_trunc_undef(i64 %x, i16 *%p) {
+define i16 @test_multiuse_trunc_undef(i64 %x, ptr %p) {
; CHECK-LABEL: @test_multiuse_trunc_undef(
; CHECK-NEXT: [[CT:%.*]] = tail call i64 @llvm.cttz.i64(i64 [[X:%.*]], i1 false), !range [[RNG2]]
; CHECK-NEXT: [[CONV:%.*]] = trunc i64 [[CT]] to i16
-; CHECK-NEXT: store i16 [[CONV]], i16* [[P:%.*]], align 2
+; CHECK-NEXT: store i16 [[CONV]], ptr [[P:%.*]], align 2
; CHECK-NEXT: ret i16 [[CONV]]
;
%ct = tail call i64 @llvm.cttz.i64(i64 %x, i1 true)
%conv = trunc i64 %ct to i16
%tobool = icmp ne i64 %x, 0
%cond = select i1 %tobool, i16 %conv, i16 64
- store i16 %conv, i16* %p
+ store i16 %conv, ptr %p
ret i16 %cond
}
; RUN: opt < %s -passes=instcombine -S | FileCheck %s
-define i64 @cmpxchg_0(i64* %ptr, i64 %compare, i64 %new_value) {
+define i64 @cmpxchg_0(ptr %ptr, i64 %compare, i64 %new_value) {
; CHECK-LABEL: @cmpxchg_0(
-; CHECK-NEXT: %tmp0 = cmpxchg i64* %ptr, i64 %compare, i64 %new_value seq_cst seq_cst
+; CHECK-NEXT: %tmp0 = cmpxchg ptr %ptr, i64 %compare, i64 %new_value seq_cst seq_cst
; CHECK-NEXT: %tmp2 = extractvalue { i64, i1 } %tmp0, 0
; CHECK-NEXT: ret i64 %tmp2
;
- %tmp0 = cmpxchg i64* %ptr, i64 %compare, i64 %new_value seq_cst seq_cst
+ %tmp0 = cmpxchg ptr %ptr, i64 %compare, i64 %new_value seq_cst seq_cst
%tmp1 = extractvalue { i64, i1 } %tmp0, 1
%tmp2 = extractvalue { i64, i1 } %tmp0, 0
%tmp3 = select i1 %tmp1, i64 %compare, i64 %tmp2
ret i64 %tmp3
}
-define i64 @cmpxchg_1(i64* %ptr, i64 %compare, i64 %new_value) {
+define i64 @cmpxchg_1(ptr %ptr, i64 %compare, i64 %new_value) {
; CHECK-LABEL: @cmpxchg_1(
-; CHECK-NEXT: %tmp0 = cmpxchg i64* %ptr, i64 %compare, i64 %new_value seq_cst seq_cst
+; CHECK-NEXT: %tmp0 = cmpxchg ptr %ptr, i64 %compare, i64 %new_value seq_cst seq_cst
; CHECK-NEXT: ret i64 %compare
;
- %tmp0 = cmpxchg i64* %ptr, i64 %compare, i64 %new_value seq_cst seq_cst
+ %tmp0 = cmpxchg ptr %ptr, i64 %compare, i64 %new_value seq_cst seq_cst
%tmp1 = extractvalue { i64, i1 } %tmp0, 1
%tmp2 = extractvalue { i64, i1 } %tmp0, 0
%tmp3 = select i1 %tmp1, i64 %tmp2, i64 %compare
ret i64 %tmp3
}
-define i64 @cmpxchg_2(i64* %ptr, i64 %compare, i64 %new_value) {
+define i64 @cmpxchg_2(ptr %ptr, i64 %compare, i64 %new_value) {
; CHECK-LABEL: @cmpxchg_2(
-; CHECK-NEXT: %tmp0 = cmpxchg i64* %ptr, i64 %compare, i64 %new_value acq_rel monotonic
+; CHECK-NEXT: %tmp0 = cmpxchg ptr %ptr, i64 %compare, i64 %new_value acq_rel monotonic
; CHECK-NEXT: ret i64 %compare
;
- %tmp0 = cmpxchg i64* %ptr, i64 %compare, i64 %new_value acq_rel monotonic
+ %tmp0 = cmpxchg ptr %ptr, i64 %compare, i64 %new_value acq_rel monotonic
%tmp1 = extractvalue { i64, i1 } %tmp0, 1
%tmp2 = extractvalue { i64, i1 } %tmp0, 0
%tmp3 = select i1 %tmp1, i64 %compare, i64 %tmp2
; NOTE: Assertions have been autogenerated by utils/update_test_checks.py
; RUN: opt < %s -passes=instcombine -S | FileCheck %s
-define i32* @test1a(i32* %p, i32* %q) {
+define ptr @test1a(ptr %p, ptr %q) {
; CHECK-LABEL: @test1a(
-; CHECK-NEXT: [[CMP:%.*]] = icmp ugt i32* [[P:%.*]], [[Q:%.*]]
-; CHECK-NEXT: [[SELECT_V:%.*]] = select i1 [[CMP]], i32* [[P]], i32* [[Q]]
-; CHECK-NEXT: [[SELECT:%.*]] = getelementptr i32, i32* [[SELECT_V]], i64 4
-; CHECK-NEXT: ret i32* [[SELECT]]
+; CHECK-NEXT: [[CMP:%.*]] = icmp ugt ptr [[P:%.*]], [[Q:%.*]]
+; CHECK-NEXT: [[SELECT_V:%.*]] = select i1 [[CMP]], ptr [[P]], ptr [[Q]]
+; CHECK-NEXT: [[SELECT:%.*]] = getelementptr i32, ptr [[SELECT_V]], i64 4
+; CHECK-NEXT: ret ptr [[SELECT]]
;
- %gep1 = getelementptr i32, i32* %p, i64 4
- %gep2 = getelementptr i32, i32* %q, i64 4
- %cmp = icmp ugt i32* %p, %q
- %select = select i1 %cmp, i32* %gep1, i32* %gep2
- ret i32* %select
+ %gep1 = getelementptr i32, ptr %p, i64 4
+ %gep2 = getelementptr i32, ptr %q, i64 4
+ %cmp = icmp ugt ptr %p, %q
+ %select = select i1 %cmp, ptr %gep1, ptr %gep2
+ ret ptr %select
}
-define i32* @test1b(i32* %p, i32* %q) {
+define ptr @test1b(ptr %p, ptr %q) {
; CHECK-LABEL: @test1b(
-; CHECK-NEXT: [[CMP:%.*]] = icmp ugt i32* [[P:%.*]], [[Q:%.*]]
-; CHECK-NEXT: [[SELECT_V:%.*]] = select i1 [[CMP]], i32* [[P]], i32* [[Q]]
-; CHECK-NEXT: [[SELECT:%.*]] = getelementptr i32, i32* [[SELECT_V]], i64 4
-; CHECK-NEXT: ret i32* [[SELECT]]
+; CHECK-NEXT: [[CMP:%.*]] = icmp ugt ptr [[P:%.*]], [[Q:%.*]]
+; CHECK-NEXT: [[SELECT_V:%.*]] = select i1 [[CMP]], ptr [[P]], ptr [[Q]]
+; CHECK-NEXT: [[SELECT:%.*]] = getelementptr i32, ptr [[SELECT_V]], i64 4
+; CHECK-NEXT: ret ptr [[SELECT]]
;
- %gep1 = getelementptr inbounds i32, i32* %p, i64 4
- %gep2 = getelementptr i32, i32* %q, i64 4
- %cmp = icmp ugt i32* %p, %q
- %select = select i1 %cmp, i32* %gep1, i32* %gep2
- ret i32* %select
+ %gep1 = getelementptr inbounds i32, ptr %p, i64 4
+ %gep2 = getelementptr i32, ptr %q, i64 4
+ %cmp = icmp ugt ptr %p, %q
+ %select = select i1 %cmp, ptr %gep1, ptr %gep2
+ ret ptr %select
}
-define i32* @test1c(i32* %p, i32* %q) {
+define ptr @test1c(ptr %p, ptr %q) {
; CHECK-LABEL: @test1c(
-; CHECK-NEXT: [[CMP:%.*]] = icmp ugt i32* [[P:%.*]], [[Q:%.*]]
-; CHECK-NEXT: [[SELECT_V:%.*]] = select i1 [[CMP]], i32* [[P]], i32* [[Q]]
-; CHECK-NEXT: [[SELECT:%.*]] = getelementptr i32, i32* [[SELECT_V]], i64 4
-; CHECK-NEXT: ret i32* [[SELECT]]
+; CHECK-NEXT: [[CMP:%.*]] = icmp ugt ptr [[P:%.*]], [[Q:%.*]]
+; CHECK-NEXT: [[SELECT_V:%.*]] = select i1 [[CMP]], ptr [[P]], ptr [[Q]]
+; CHECK-NEXT: [[SELECT:%.*]] = getelementptr i32, ptr [[SELECT_V]], i64 4
+; CHECK-NEXT: ret ptr [[SELECT]]
;
- %gep1 = getelementptr i32, i32* %p, i64 4
- %gep2 = getelementptr inbounds i32, i32* %q, i64 4
- %cmp = icmp ugt i32* %p, %q
- %select = select i1 %cmp, i32* %gep1, i32* %gep2
- ret i32* %select
+ %gep1 = getelementptr i32, ptr %p, i64 4
+ %gep2 = getelementptr inbounds i32, ptr %q, i64 4
+ %cmp = icmp ugt ptr %p, %q
+ %select = select i1 %cmp, ptr %gep1, ptr %gep2
+ ret ptr %select
}
-define i32* @test1d(i32* %p, i32* %q) {
+define ptr @test1d(ptr %p, ptr %q) {
; CHECK-LABEL: @test1d(
-; CHECK-NEXT: [[CMP:%.*]] = icmp ugt i32* [[P:%.*]], [[Q:%.*]]
-; CHECK-NEXT: [[SELECT_V:%.*]] = select i1 [[CMP]], i32* [[P]], i32* [[Q]]
-; CHECK-NEXT: [[SELECT:%.*]] = getelementptr inbounds i32, i32* [[SELECT_V]], i64 4
-; CHECK-NEXT: ret i32* [[SELECT]]
+; CHECK-NEXT: [[CMP:%.*]] = icmp ugt ptr [[P:%.*]], [[Q:%.*]]
+; CHECK-NEXT: [[SELECT_V:%.*]] = select i1 [[CMP]], ptr [[P]], ptr [[Q]]
+; CHECK-NEXT: [[SELECT:%.*]] = getelementptr inbounds i32, ptr [[SELECT_V]], i64 4
+; CHECK-NEXT: ret ptr [[SELECT]]
;
- %gep1 = getelementptr inbounds i32, i32* %p, i64 4
- %gep2 = getelementptr inbounds i32, i32* %q, i64 4
- %cmp = icmp ugt i32* %p, %q
- %select = select i1 %cmp, i32* %gep1, i32* %gep2
- ret i32* %select
+ %gep1 = getelementptr inbounds i32, ptr %p, i64 4
+ %gep2 = getelementptr inbounds i32, ptr %q, i64 4
+ %cmp = icmp ugt ptr %p, %q
+ %select = select i1 %cmp, ptr %gep1, ptr %gep2
+ ret ptr %select
}
-define i32* @test2(i32* %p, i64 %x, i64 %y) {
+define ptr @test2(ptr %p, i64 %x, i64 %y) {
; CHECK-LABEL: @test2(
; CHECK-NEXT: [[TMP1:%.*]] = call i64 @llvm.umax.i64(i64 [[X:%.*]], i64 [[Y:%.*]])
-; CHECK-NEXT: [[SELECT:%.*]] = getelementptr inbounds i32, i32* [[P:%.*]], i64 [[TMP1]]
-; CHECK-NEXT: ret i32* [[SELECT]]
+; CHECK-NEXT: [[SELECT:%.*]] = getelementptr inbounds i32, ptr [[P:%.*]], i64 [[TMP1]]
+; CHECK-NEXT: ret ptr [[SELECT]]
;
- %gep1 = getelementptr inbounds i32, i32* %p, i64 %x
- %gep2 = getelementptr inbounds i32, i32* %p, i64 %y
+ %gep1 = getelementptr inbounds i32, ptr %p, i64 %x
+ %gep2 = getelementptr inbounds i32, ptr %p, i64 %y
%cmp = icmp ugt i64 %x, %y
- %select = select i1 %cmp, i32* %gep1, i32* %gep2
- ret i32* %select
+ %select = select i1 %cmp, ptr %gep1, ptr %gep2
+ ret ptr %select
}
; PR50183
-define i32* @test2a(i32* %p, i64 %x, i64 %y) {
+define ptr @test2a(ptr %p, i64 %x, i64 %y) {
; CHECK-LABEL: @test2a(
; CHECK-NEXT: [[CMP:%.*]] = icmp ugt i64 [[X:%.*]], [[Y:%.*]]
; CHECK-NEXT: [[SELECT_IDX:%.*]] = select i1 [[CMP]], i64 [[X]], i64 0
-; CHECK-NEXT: [[SELECT:%.*]] = getelementptr i32, i32* [[P:%.*]], i64 [[SELECT_IDX]]
-; CHECK-NEXT: ret i32* [[SELECT]]
+; CHECK-NEXT: [[SELECT:%.*]] = getelementptr i32, ptr [[P:%.*]], i64 [[SELECT_IDX]]
+; CHECK-NEXT: ret ptr [[SELECT]]
;
- %gep = getelementptr inbounds i32, i32* %p, i64 %x
+ %gep = getelementptr inbounds i32, ptr %p, i64 %x
%cmp = icmp ugt i64 %x, %y
- %select = select i1 %cmp, i32* %gep, i32* %p
- ret i32* %select
+ %select = select i1 %cmp, ptr %gep, ptr %p
+ ret ptr %select
}
; PR50183
-define i32* @test2b(i32* %p, i64 %x, i64 %y) {
+define ptr @test2b(ptr %p, i64 %x, i64 %y) {
; CHECK-LABEL: @test2b(
; CHECK-NEXT: [[CMP:%.*]] = icmp ugt i64 [[X:%.*]], [[Y:%.*]]
; CHECK-NEXT: [[SELECT_IDX:%.*]] = select i1 [[CMP]], i64 0, i64 [[X]]
-; CHECK-NEXT: [[SELECT:%.*]] = getelementptr i32, i32* [[P:%.*]], i64 [[SELECT_IDX]]
-; CHECK-NEXT: ret i32* [[SELECT]]
+; CHECK-NEXT: [[SELECT:%.*]] = getelementptr i32, ptr [[P:%.*]], i64 [[SELECT_IDX]]
+; CHECK-NEXT: ret ptr [[SELECT]]
;
- %gep = getelementptr inbounds i32, i32* %p, i64 %x
+ %gep = getelementptr inbounds i32, ptr %p, i64 %x
%cmp = icmp ugt i64 %x, %y
- %select = select i1 %cmp, i32* %p, i32* %gep
- ret i32* %select
+ %select = select i1 %cmp, ptr %p, ptr %gep
+ ret ptr %select
}
; PR51069
-define i32* @test2c(i32* %p, i64 %x, i64 %y) {
+define ptr @test2c(ptr %p, i64 %x, i64 %y) {
; CHECK-LABEL: @test2c(
-; CHECK-NEXT: [[GEP1:%.*]] = getelementptr inbounds i32, i32* [[P:%.*]], i64 [[X:%.*]]
+; CHECK-NEXT: [[GEP1:%.*]] = getelementptr inbounds i32, ptr [[P:%.*]], i64 [[X:%.*]]
; CHECK-NEXT: [[ICMP:%.*]] = icmp ugt i64 [[X]], [[Y:%.*]]
; CHECK-NEXT: [[SEL_IDX:%.*]] = select i1 [[ICMP]], i64 0, i64 6
-; CHECK-NEXT: [[SEL:%.*]] = getelementptr i32, i32* [[GEP1]], i64 [[SEL_IDX]]
-; CHECK-NEXT: ret i32* [[SEL]]
+; CHECK-NEXT: [[SEL:%.*]] = getelementptr i32, ptr [[GEP1]], i64 [[SEL_IDX]]
+; CHECK-NEXT: ret ptr [[SEL]]
;
- %gep1 = getelementptr inbounds i32, i32* %p, i64 %x
- %gep2 = getelementptr inbounds i32, i32* %gep1, i64 6
+ %gep1 = getelementptr inbounds i32, ptr %p, i64 %x
+ %gep2 = getelementptr inbounds i32, ptr %gep1, i64 6
%icmp = icmp ugt i64 %x, %y
- %sel = select i1 %icmp, i32* %gep1, i32* %gep2
- ret i32* %sel
+ %sel = select i1 %icmp, ptr %gep1, ptr %gep2
+ ret ptr %sel
}
; PR51069
-define i32* @test2d(i32* %p, i64 %x, i64 %y) {
+define ptr @test2d(ptr %p, i64 %x, i64 %y) {
; CHECK-LABEL: @test2d(
-; CHECK-NEXT: [[GEP1:%.*]] = getelementptr inbounds i32, i32* [[P:%.*]], i64 [[X:%.*]]
+; CHECK-NEXT: [[GEP1:%.*]] = getelementptr inbounds i32, ptr [[P:%.*]], i64 [[X:%.*]]
; CHECK-NEXT: [[ICMP:%.*]] = icmp ugt i64 [[X]], [[Y:%.*]]
; CHECK-NEXT: [[SEL_IDX:%.*]] = select i1 [[ICMP]], i64 6, i64 0
-; CHECK-NEXT: [[SEL:%.*]] = getelementptr i32, i32* [[GEP1]], i64 [[SEL_IDX]]
-; CHECK-NEXT: ret i32* [[SEL]]
+; CHECK-NEXT: [[SEL:%.*]] = getelementptr i32, ptr [[GEP1]], i64 [[SEL_IDX]]
+; CHECK-NEXT: ret ptr [[SEL]]
;
- %gep1 = getelementptr inbounds i32, i32* %p, i64 %x
- %gep2 = getelementptr inbounds i32, i32* %gep1, i64 6
+ %gep1 = getelementptr inbounds i32, ptr %p, i64 %x
+ %gep2 = getelementptr inbounds i32, ptr %gep1, i64 6
%icmp = icmp ugt i64 %x, %y
- %sel = select i1 %icmp, i32* %gep2, i32* %gep1
- ret i32* %sel
+ %sel = select i1 %icmp, ptr %gep2, ptr %gep1
+ ret ptr %sel
}
; Three (or more) operand GEPs are currently expected to not be optimised,
; though they could be in principle.
-define i32* @test3a([4 x i32]* %p, i64 %x, i64 %y) {
+define ptr @test3a(ptr %p, i64 %x, i64 %y) {
; CHECK-LABEL: @test3a(
-; CHECK-NEXT: [[GEP1:%.*]] = getelementptr inbounds [4 x i32], [4 x i32]* [[P:%.*]], i64 2, i64 [[X:%.*]]
-; CHECK-NEXT: [[GEP2:%.*]] = getelementptr inbounds [4 x i32], [4 x i32]* [[P]], i64 2, i64 [[Y:%.*]]
+; CHECK-NEXT: [[GEP1:%.*]] = getelementptr inbounds [4 x i32], ptr [[P:%.*]], i64 2, i64 [[X:%.*]]
+; CHECK-NEXT: [[GEP2:%.*]] = getelementptr inbounds [4 x i32], ptr [[P]], i64 2, i64 [[Y:%.*]]
; CHECK-NEXT: [[CMP:%.*]] = icmp ugt i64 [[X]], [[Y]]
-; CHECK-NEXT: [[SELECT:%.*]] = select i1 [[CMP]], i32* [[GEP1]], i32* [[GEP2]]
-; CHECK-NEXT: ret i32* [[SELECT]]
+; CHECK-NEXT: [[SELECT:%.*]] = select i1 [[CMP]], ptr [[GEP1]], ptr [[GEP2]]
+; CHECK-NEXT: ret ptr [[SELECT]]
;
- %gep1 = getelementptr inbounds [4 x i32], [4 x i32]* %p, i64 2, i64 %x
- %gep2 = getelementptr inbounds [4 x i32], [4 x i32]* %p, i64 2, i64 %y
+ %gep1 = getelementptr inbounds [4 x i32], ptr %p, i64 2, i64 %x
+ %gep2 = getelementptr inbounds [4 x i32], ptr %p, i64 2, i64 %y
%cmp = icmp ugt i64 %x, %y
- %select = select i1 %cmp, i32* %gep1, i32* %gep2
- ret i32* %select
+ %select = select i1 %cmp, ptr %gep1, ptr %gep2
+ ret ptr %select
}
-define i32* @test3b([4 x i32]* %p, [4 x i32]* %q, i64 %x, i64 %y) {
+define ptr @test3b(ptr %p, ptr %q, i64 %x, i64 %y) {
; CHECK-LABEL: @test3b(
-; CHECK-NEXT: [[GEP1:%.*]] = getelementptr inbounds [4 x i32], [4 x i32]* [[P:%.*]], i64 2, i64 [[X:%.*]]
-; CHECK-NEXT: [[GEP2:%.*]] = getelementptr inbounds [4 x i32], [4 x i32]* [[Q:%.*]], i64 2, i64 [[X]]
+; CHECK-NEXT: [[GEP1:%.*]] = getelementptr inbounds [4 x i32], ptr [[P:%.*]], i64 2, i64 [[X:%.*]]
+; CHECK-NEXT: [[GEP2:%.*]] = getelementptr inbounds [4 x i32], ptr [[Q:%.*]], i64 2, i64 [[X]]
; CHECK-NEXT: [[CMP:%.*]] = icmp ugt i64 [[X]], [[Y:%.*]]
-; CHECK-NEXT: [[SELECT:%.*]] = select i1 [[CMP]], i32* [[GEP1]], i32* [[GEP2]]
-; CHECK-NEXT: ret i32* [[SELECT]]
+; CHECK-NEXT: [[SELECT:%.*]] = select i1 [[CMP]], ptr [[GEP1]], ptr [[GEP2]]
+; CHECK-NEXT: ret ptr [[SELECT]]
;
- %gep1 = getelementptr inbounds [4 x i32], [4 x i32]* %p, i64 2, i64 %x
- %gep2 = getelementptr inbounds [4 x i32], [4 x i32]* %q, i64 2, i64 %x
+ %gep1 = getelementptr inbounds [4 x i32], ptr %p, i64 2, i64 %x
+ %gep2 = getelementptr inbounds [4 x i32], ptr %q, i64 2, i64 %x
%cmp = icmp ugt i64 %x, %y
- %select = select i1 %cmp, i32* %gep1, i32* %gep2
- ret i32* %select
+ %select = select i1 %cmp, ptr %gep1, ptr %gep2
+ ret ptr %select
}
-define i32* @test3c([4 x i32]* %p, i32* %q, i64 %x, i64 %y) {
+define ptr @test3c(ptr %p, ptr %q, i64 %x, i64 %y) {
; CHECK-LABEL: @test3c(
-; CHECK-NEXT: [[GEP1:%.*]] = getelementptr inbounds [4 x i32], [4 x i32]* [[P:%.*]], i64 [[X:%.*]], i64 2
-; CHECK-NEXT: [[GEP2:%.*]] = getelementptr inbounds i32, i32* [[Q:%.*]], i64 [[X]]
+; CHECK-NEXT: [[GEP1:%.*]] = getelementptr inbounds [4 x i32], ptr [[P:%.*]], i64 [[X:%.*]], i64 2
+; CHECK-NEXT: [[GEP2:%.*]] = getelementptr inbounds i32, ptr [[Q:%.*]], i64 [[X]]
; CHECK-NEXT: [[CMP:%.*]] = icmp ugt i64 [[X]], [[Y:%.*]]
-; CHECK-NEXT: [[SELECT:%.*]] = select i1 [[CMP]], i32* [[GEP1]], i32* [[GEP2]]
-; CHECK-NEXT: ret i32* [[SELECT]]
+; CHECK-NEXT: [[SELECT:%.*]] = select i1 [[CMP]], ptr [[GEP1]], ptr [[GEP2]]
+; CHECK-NEXT: ret ptr [[SELECT]]
;
- %gep1 = getelementptr inbounds [4 x i32], [4 x i32]* %p, i64 %x, i64 2
- %gep2 = getelementptr inbounds i32, i32* %q, i64 %x
+ %gep1 = getelementptr inbounds [4 x i32], ptr %p, i64 %x, i64 2
+ %gep2 = getelementptr inbounds i32, ptr %q, i64 %x
%cmp = icmp ugt i64 %x, %y
- %select = select i1 %cmp, i32* %gep1, i32* %gep2
- ret i32* %select
+ %select = select i1 %cmp, ptr %gep1, ptr %gep2
+ ret ptr %select
}
-define i32* @test3d(i32* %p, [4 x i32]* %q, i64 %x, i64 %y) {
+define ptr @test3d(ptr %p, ptr %q, i64 %x, i64 %y) {
; CHECK-LABEL: @test3d(
-; CHECK-NEXT: [[GEP1:%.*]] = getelementptr inbounds i32, i32* [[P:%.*]], i64 [[X:%.*]]
-; CHECK-NEXT: [[GEP2:%.*]] = getelementptr inbounds [4 x i32], [4 x i32]* [[Q:%.*]], i64 [[X]], i64 2
+; CHECK-NEXT: [[GEP1:%.*]] = getelementptr inbounds i32, ptr [[P:%.*]], i64 [[X:%.*]]
+; CHECK-NEXT: [[GEP2:%.*]] = getelementptr inbounds [4 x i32], ptr [[Q:%.*]], i64 [[X]], i64 2
; CHECK-NEXT: [[CMP:%.*]] = icmp ugt i64 [[X]], [[Y:%.*]]
-; CHECK-NEXT: [[SELECT:%.*]] = select i1 [[CMP]], i32* [[GEP1]], i32* [[GEP2]]
-; CHECK-NEXT: ret i32* [[SELECT]]
+; CHECK-NEXT: [[SELECT:%.*]] = select i1 [[CMP]], ptr [[GEP1]], ptr [[GEP2]]
+; CHECK-NEXT: ret ptr [[SELECT]]
;
- %gep1 = getelementptr inbounds i32, i32* %p, i64 %x
- %gep2 = getelementptr inbounds [4 x i32], [4 x i32]* %q, i64 %x, i64 2
+ %gep1 = getelementptr inbounds i32, ptr %p, i64 %x
+ %gep2 = getelementptr inbounds [4 x i32], ptr %q, i64 %x, i64 2
%cmp = icmp ugt i64 %x, %y
- %select = select i1 %cmp, i32* %gep1, i32* %gep2
- ret i32* %select
+ %select = select i1 %cmp, ptr %gep1, ptr %gep2
+ ret ptr %select
}
; Shouldn't be optimised as it would mean introducing an extra select
-define i32* @test4(i32* %p, i32* %q, i64 %x, i64 %y) {
+define ptr @test4(ptr %p, ptr %q, i64 %x, i64 %y) {
; CHECK-LABEL: @test4(
-; CHECK-NEXT: [[GEP1:%.*]] = getelementptr inbounds i32, i32* [[P:%.*]], i64 [[X:%.*]]
-; CHECK-NEXT: [[GEP2:%.*]] = getelementptr inbounds i32, i32* [[Q:%.*]], i64 [[Y:%.*]]
+; CHECK-NEXT: [[GEP1:%.*]] = getelementptr inbounds i32, ptr [[P:%.*]], i64 [[X:%.*]]
+; CHECK-NEXT: [[GEP2:%.*]] = getelementptr inbounds i32, ptr [[Q:%.*]], i64 [[Y:%.*]]
; CHECK-NEXT: [[CMP:%.*]] = icmp ugt i64 [[X]], [[Y]]
-; CHECK-NEXT: [[SELECT:%.*]] = select i1 [[CMP]], i32* [[GEP1]], i32* [[GEP2]]
-; CHECK-NEXT: ret i32* [[SELECT]]
+; CHECK-NEXT: [[SELECT:%.*]] = select i1 [[CMP]], ptr [[GEP1]], ptr [[GEP2]]
+; CHECK-NEXT: ret ptr [[SELECT]]
;
- %gep1 = getelementptr inbounds i32, i32* %p, i64 %x
- %gep2 = getelementptr inbounds i32, i32* %q, i64 %y
+ %gep1 = getelementptr inbounds i32, ptr %p, i64 %x
+ %gep2 = getelementptr inbounds i32, ptr %q, i64 %y
%cmp = icmp ugt i64 %x, %y
- %select = select i1 %cmp, i32* %gep1, i32* %gep2
- ret i32* %select
+ %select = select i1 %cmp, ptr %gep1, ptr %gep2
+ ret ptr %select
}
; We cannot create a select with a vector condition but scalar operands.
-define <2 x i64*> @test5(i64* %p1, i64* %p2, <2 x i64> %idx, <2 x i1> %cc) {
+define <2 x ptr> @test5(ptr %p1, ptr %p2, <2 x i64> %idx, <2 x i1> %cc) {
; CHECK-LABEL: @test5(
-; CHECK-NEXT: [[GEP1:%.*]] = getelementptr i64, i64* [[P1:%.*]], <2 x i64> [[IDX:%.*]]
-; CHECK-NEXT: [[GEP2:%.*]] = getelementptr i64, i64* [[P2:%.*]], <2 x i64> [[IDX]]
-; CHECK-NEXT: [[SELECT:%.*]] = select <2 x i1> [[CC:%.*]], <2 x i64*> [[GEP1]], <2 x i64*> [[GEP2]]
-; CHECK-NEXT: ret <2 x i64*> [[SELECT]]
+; CHECK-NEXT: [[GEP1:%.*]] = getelementptr i64, ptr [[P1:%.*]], <2 x i64> [[IDX:%.*]]
+; CHECK-NEXT: [[GEP2:%.*]] = getelementptr i64, ptr [[P2:%.*]], <2 x i64> [[IDX]]
+; CHECK-NEXT: [[SELECT:%.*]] = select <2 x i1> [[CC:%.*]], <2 x ptr> [[GEP1]], <2 x ptr> [[GEP2]]
+; CHECK-NEXT: ret <2 x ptr> [[SELECT]]
;
- %gep1 = getelementptr i64, i64* %p1, <2 x i64> %idx
- %gep2 = getelementptr i64, i64* %p2, <2 x i64> %idx
- %select = select <2 x i1> %cc, <2 x i64*> %gep1, <2 x i64*> %gep2
- ret <2 x i64*> %select
+ %gep1 = getelementptr i64, ptr %p1, <2 x i64> %idx
+ %gep2 = getelementptr i64, ptr %p2, <2 x i64> %idx
+ %select = select <2 x i1> %cc, <2 x ptr> %gep1, <2 x ptr> %gep2
+ ret <2 x ptr> %select
}
; PR51069 - multiple uses
-define i32* @test6(i32* %p, i64 %x, i64 %y) {
+define ptr @test6(ptr %p, i64 %x, i64 %y) {
; CHECK-LABEL: @test6(
-; CHECK-NEXT: [[GEP1:%.*]] = getelementptr inbounds i32, i32* [[P:%.*]], i64 [[X:%.*]]
+; CHECK-NEXT: [[GEP1:%.*]] = getelementptr inbounds i32, ptr [[P:%.*]], i64 [[X:%.*]]
; CHECK-NEXT: [[ICMP:%.*]] = icmp ugt i64 [[X]], [[Y:%.*]]
; CHECK-NEXT: [[SEL_IDX:%.*]] = select i1 [[ICMP]], i64 [[Y]], i64 0
-; CHECK-NEXT: [[SEL:%.*]] = getelementptr i32, i32* [[GEP1]], i64 [[SEL_IDX]]
-; CHECK-NEXT: call void @use_i32p(i32* [[GEP1]])
-; CHECK-NEXT: ret i32* [[SEL]]
+; CHECK-NEXT: [[SEL:%.*]] = getelementptr i32, ptr [[GEP1]], i64 [[SEL_IDX]]
+; CHECK-NEXT: call void @use_i32p(ptr [[GEP1]])
+; CHECK-NEXT: ret ptr [[SEL]]
;
- %gep1 = getelementptr inbounds i32, i32* %p, i64 %x
- %gep2 = getelementptr inbounds i32, i32* %gep1, i64 %y
+ %gep1 = getelementptr inbounds i32, ptr %p, i64 %x
+ %gep2 = getelementptr inbounds i32, ptr %gep1, i64 %y
%icmp = icmp ugt i64 %x, %y
- %sel = select i1 %icmp, i32* %gep2, i32* %gep1
- call void @use_i32p(i32* %gep1)
- ret i32* %sel
+ %sel = select i1 %icmp, ptr %gep2, ptr %gep1
+ call void @use_i32p(ptr %gep1)
+ ret ptr %sel
}
-declare void @use_i32p(i32*)
+declare void @use_i32p(ptr)
; We cannot create a select-with-idx with a vector condition but scalar idx.
-define <2 x i64*> @test7(<2 x i64*> %p1, i64 %idx, <2 x i1> %cc) {
+define <2 x ptr> @test7(<2 x ptr> %p1, i64 %idx, <2 x i1> %cc) {
; CHECK-LABEL: @test7(
-; CHECK-NEXT: [[GEP:%.*]] = getelementptr i64, <2 x i64*> [[P1:%.*]], i64 [[IDX]]
-; CHECK-NEXT: [[SELECT:%.*]] = select <2 x i1> [[CC:%.*]], <2 x i64*> [[P1:%.*]], <2 x i64*> [[GEP]]
-; CHECK-NEXT: ret <2 x i64*> [[SELECT]]
+; CHECK-NEXT: [[GEP:%.*]] = getelementptr i64, <2 x ptr> [[P1:%.*]], i64 [[IDX]]
+; CHECK-NEXT: [[SELECT:%.*]] = select <2 x i1> [[CC:%.*]], <2 x ptr> [[P1:%.*]], <2 x ptr> [[GEP]]
+; CHECK-NEXT: ret <2 x ptr> [[SELECT]]
;
- %gep = getelementptr i64, <2 x i64*> %p1, i64 %idx
- %select = select <2 x i1> %cc, <2 x i64*> %p1, <2 x i64*> %gep
- ret <2 x i64*> %select
+ %gep = getelementptr i64, <2 x ptr> %p1, i64 %idx
+ %select = select <2 x i1> %cc, <2 x ptr> %p1, <2 x ptr> %gep
+ ret <2 x ptr> %select
}
declare void @test2()
-define i32 @test(i1 %cond, i32 *%P) {
+define i32 @test(i1 %cond, ptr %P) {
%A = alloca i32
- store i32 1, i32* %P
- store i32 1, i32* %A
+ store i32 1, ptr %P
+ store i32 1, ptr %A
call void @test2() readonly
- %P2 = select i1 %cond, i32 *%P, i32* %A
- %V = load i32, i32* %P2
+ %P2 = select i1 %cond, ptr %P, ptr %A
+ %V = load i32, ptr %P2
ret i32 %V
}
; RUN: opt < %s -passes=instcombine -S | FileCheck %s
; Fold zeroing of inactive lanes into the load's passthrough parameter.
-define <4 x float> @masked_load_and_zero_inactive_1(<4 x float>* %ptr, <4 x i1> %mask) {
+define <4 x float> @masked_load_and_zero_inactive_1(ptr %ptr, <4 x i1> %mask) {
; CHECK-LABEL: @masked_load_and_zero_inactive_1(
-; CHECK: %load = call <4 x float> @llvm.masked.load.v4f32.p0v4f32(<4 x float>* %ptr, i32 4, <4 x i1> %mask, <4 x float> zeroinitializer)
+; CHECK: %load = call <4 x float> @llvm.masked.load.v4f32.p0(ptr %ptr, i32 4, <4 x i1> %mask, <4 x float> zeroinitializer)
; CHECK-NEXT: ret <4 x float> %load
- %load = call <4 x float> @llvm.masked.load.v4f32.p0v4f32(<4 x float>* %ptr, i32 4, <4 x i1> %mask, <4 x float> undef)
+ %load = call <4 x float> @llvm.masked.load.v4f32.p0(ptr %ptr, i32 4, <4 x i1> %mask, <4 x float> undef)
%masked = select <4 x i1> %mask, <4 x float> %load, <4 x float> zeroinitializer
ret <4 x float> %masked
}
; As above but reuse the load's existing passthrough.
-define <4 x i32> @masked_load_and_zero_inactive_2(<4 x i32>* %ptr, <4 x i1> %mask) {
+define <4 x i32> @masked_load_and_zero_inactive_2(ptr %ptr, <4 x i1> %mask) {
; CHECK-LABEL: @masked_load_and_zero_inactive_2(
-; CHECK: %load = call <4 x i32> @llvm.masked.load.v4i32.p0v4i32(<4 x i32>* %ptr, i32 4, <4 x i1> %mask, <4 x i32> zeroinitializer)
+; CHECK: %load = call <4 x i32> @llvm.masked.load.v4i32.p0(ptr %ptr, i32 4, <4 x i1> %mask, <4 x i32> zeroinitializer)
; CHECK-NEXT: ret <4 x i32> %load
- %load = call <4 x i32> @llvm.masked.load.v4i32.p0v4i32(<4 x i32>* %ptr, i32 4, <4 x i1> %mask, <4 x i32> zeroinitializer)
+ %load = call <4 x i32> @llvm.masked.load.v4i32.p0(ptr %ptr, i32 4, <4 x i1> %mask, <4 x i32> zeroinitializer)
%masked = select <4 x i1> %mask, <4 x i32> %load, <4 x i32> zeroinitializer
ret <4 x i32> %masked
}
; No transform when the load's passthrough cannot be reused or altered.
-define <4 x i32> @masked_load_and_zero_inactive_3(<4 x i32>* %ptr, <4 x i1> %mask, <4 x i32> %passthrough) {
+define <4 x i32> @masked_load_and_zero_inactive_3(ptr %ptr, <4 x i1> %mask, <4 x i32> %passthrough) {
; CHECK-LABEL: @masked_load_and_zero_inactive_3(
-; CHECK: %load = call <4 x i32> @llvm.masked.load.v4i32.p0v4i32(<4 x i32>* %ptr, i32 4, <4 x i1> %mask, <4 x i32> %passthrough)
+; CHECK: %load = call <4 x i32> @llvm.masked.load.v4i32.p0(ptr %ptr, i32 4, <4 x i1> %mask, <4 x i32> %passthrough)
; CHECK-NEXT: %masked = select <4 x i1> %mask, <4 x i32> %load, <4 x i32> zeroinitializer
; CHECK-NEXT: ret <4 x i32> %masked
- %load = call <4 x i32> @llvm.masked.load.v4i32.p0v4i32(<4 x i32>* %ptr, i32 4, <4 x i1> %mask, <4 x i32> %passthrough)
+ %load = call <4 x i32> @llvm.masked.load.v4i32.p0(ptr %ptr, i32 4, <4 x i1> %mask, <4 x i32> %passthrough)
%masked = select <4 x i1> %mask, <4 x i32> %load, <4 x i32> zeroinitializer
ret <4 x i32> %masked
}
; Remove redundant select when its mask doesn't overlap with the load mask.
-define <4 x i32> @masked_load_and_zero_inactive_4(<4 x i32>* %ptr, <4 x i1> %inv_mask) {
+define <4 x i32> @masked_load_and_zero_inactive_4(ptr %ptr, <4 x i1> %inv_mask) {
; CHECK-LABEL: @masked_load_and_zero_inactive_4(
; CHECK: %mask = xor <4 x i1> %inv_mask, <i1 true, i1 true, i1 true, i1 true>
-; CHECK-NEXT: %load = call <4 x i32> @llvm.masked.load.v4i32.p0v4i32(<4 x i32>* %ptr, i32 4, <4 x i1> %mask, <4 x i32> zeroinitializer)
+; CHECK-NEXT: %load = call <4 x i32> @llvm.masked.load.v4i32.p0(ptr %ptr, i32 4, <4 x i1> %mask, <4 x i32> zeroinitializer)
; CHECK-NEXT: ret <4 x i32> %load
%mask = xor <4 x i1> %inv_mask, <i1 true, i1 true, i1 true, i1 true>
- %load = call <4 x i32> @llvm.masked.load.v4i32.p0v4i32(<4 x i32>* %ptr, i32 4, <4 x i1> %mask, <4 x i32> undef)
+ %load = call <4 x i32> @llvm.masked.load.v4i32.p0(ptr %ptr, i32 4, <4 x i1> %mask, <4 x i32> undef)
%masked = select <4 x i1> %inv_mask, <4 x i32> zeroinitializer, <4 x i32> %load
ret <4 x i32> %masked
}
; As above but reuse the load's existing passthrough.
-define <4 x i32> @masked_load_and_zero_inactive_5(<4 x i32>* %ptr, <4 x i1> %inv_mask) {
+define <4 x i32> @masked_load_and_zero_inactive_5(ptr %ptr, <4 x i1> %inv_mask) {
; CHECK-LABEL: @masked_load_and_zero_inactive_5(
; CHECK: %mask = xor <4 x i1> %inv_mask, <i1 true, i1 true, i1 true, i1 true>
-; CHECK-NEXT: %load = call <4 x i32> @llvm.masked.load.v4i32.p0v4i32(<4 x i32>* %ptr, i32 4, <4 x i1> %mask, <4 x i32> zeroinitializer)
+; CHECK-NEXT: %load = call <4 x i32> @llvm.masked.load.v4i32.p0(ptr %ptr, i32 4, <4 x i1> %mask, <4 x i32> zeroinitializer)
; CHECK-NEXT: ret <4 x i32> %load
%mask = xor <4 x i1> %inv_mask, <i1 true, i1 true, i1 true, i1 true>
- %load = call <4 x i32> @llvm.masked.load.v4i32.p0v4i32(<4 x i32>* %ptr, i32 4, <4 x i1> %mask, <4 x i32> zeroinitializer)
+ %load = call <4 x i32> @llvm.masked.load.v4i32.p0(ptr %ptr, i32 4, <4 x i1> %mask, <4 x i32> zeroinitializer)
%masked = select <4 x i1> %inv_mask, <4 x i32> zeroinitializer, <4 x i32> %load
ret <4 x i32> %masked
}
; No transform when the load's passthrough cannot be reused or altered.
-define <4 x i32> @masked_load_and_zero_inactive_6(<4 x i32>* %ptr, <4 x i1> %inv_mask, <4 x i32> %passthrough) {
+define <4 x i32> @masked_load_and_zero_inactive_6(ptr %ptr, <4 x i1> %inv_mask, <4 x i32> %passthrough) {
; CHECK-LABEL: @masked_load_and_zero_inactive_6(
; CHECK: %mask = xor <4 x i1> %inv_mask, <i1 true, i1 true, i1 true, i1 true>
-; CHECK-NEXT: %load = call <4 x i32> @llvm.masked.load.v4i32.p0v4i32(<4 x i32>* %ptr, i32 4, <4 x i1> %mask, <4 x i32> %passthrough)
+; CHECK-NEXT: %load = call <4 x i32> @llvm.masked.load.v4i32.p0(ptr %ptr, i32 4, <4 x i1> %mask, <4 x i32> %passthrough)
; CHECK-NEXT: %masked = select <4 x i1> %inv_mask, <4 x i32> zeroinitializer, <4 x i32> %load
; CHECK-NEXT: ret <4 x i32> %masked
%mask = xor <4 x i1> %inv_mask, <i1 true, i1 true, i1 true, i1 true>
- %load = call <4 x i32> @llvm.masked.load.v4i32.p0v4i32(<4 x i32>* %ptr, i32 4, <4 x i1> %mask, <4 x i32> %passthrough)
+ %load = call <4 x i32> @llvm.masked.load.v4i32.p0(ptr %ptr, i32 4, <4 x i1> %mask, <4 x i32> %passthrough)
%masked = select <4 x i1> %inv_mask, <4 x i32> zeroinitializer, <4 x i32> %load
ret <4 x i32> %masked
}
; No transform when select and load masks have no relation.
-define <4 x i32> @masked_load_and_zero_inactive_7(<4 x i32>* %ptr, <4 x i1> %mask1, <4 x i1> %mask2) {
+define <4 x i32> @masked_load_and_zero_inactive_7(ptr %ptr, <4 x i1> %mask1, <4 x i1> %mask2) {
; CHECK-LABEL: @masked_load_and_zero_inactive_7(
-; CHECK: %load = call <4 x i32> @llvm.masked.load.v4i32.p0v4i32(<4 x i32>* %ptr, i32 4, <4 x i1> %mask1, <4 x i32> zeroinitializer)
+; CHECK: %load = call <4 x i32> @llvm.masked.load.v4i32.p0(ptr %ptr, i32 4, <4 x i1> %mask1, <4 x i32> zeroinitializer)
; CHECK-NEXT: %masked = select <4 x i1> %mask2, <4 x i32> zeroinitializer, <4 x i32> %load
; CHECK-NEXT: ret <4 x i32> %masked
- %load = call <4 x i32> @llvm.masked.load.v4i32.p0v4i32(<4 x i32>* %ptr, i32 4, <4 x i1> %mask1, <4 x i32> zeroinitializer)
+ %load = call <4 x i32> @llvm.masked.load.v4i32.p0(ptr %ptr, i32 4, <4 x i1> %mask1, <4 x i32> zeroinitializer)
%masked = select <4 x i1> %mask2, <4 x i32> zeroinitializer, <4 x i32> %load
ret <4 x i32> %masked
}
; A more complex case where we can prove the select mask is a subset of the
; load's inactive lanes and thus the load's passthrough takes effect.
-define <4 x float> @masked_load_and_zero_inactive_8(<4 x float>* %ptr, <4 x i1> %inv_mask, <4 x i1> %cond) {
+define <4 x float> @masked_load_and_zero_inactive_8(ptr %ptr, <4 x i1> %inv_mask, <4 x i1> %cond) {
; CHECK-LABEL: @masked_load_and_zero_inactive_8(
; CHECK: %mask = xor <4 x i1> %inv_mask, <i1 true, i1 true, i1 true, i1 true>
; CHECK-NEXT: %pg = and <4 x i1> %mask, %cond
-; CHECK-NEXT: %load = call <4 x float> @llvm.masked.load.v4f32.p0v4f32(<4 x float>* %ptr, i32 4, <4 x i1> %pg, <4 x float> zeroinitializer)
+; CHECK-NEXT: %load = call <4 x float> @llvm.masked.load.v4f32.p0(ptr %ptr, i32 4, <4 x i1> %pg, <4 x float> zeroinitializer)
; CHECK-NEXT: ret <4 x float> %load
%mask = xor <4 x i1> %inv_mask, <i1 true, i1 true, i1 true, i1 true>
%pg = and <4 x i1> %mask, %cond
- %load = call <4 x float> @llvm.masked.load.v4f32.p0v4f32(<4 x float>* %ptr, i32 4, <4 x i1> %pg, <4 x float> undef)
+ %load = call <4 x float> @llvm.masked.load.v4f32.p0(ptr %ptr, i32 4, <4 x i1> %pg, <4 x float> undef)
%masked = select <4 x i1> %inv_mask, <4 x float> zeroinitializer, <4 x float> %load
ret <4 x float> %masked
}
-define <8 x float> @masked_load_and_scalar_select_cond(<8 x float>* %ptr, <8 x i1> %mask, i1 %cond) {
+define <8 x float> @masked_load_and_scalar_select_cond(ptr %ptr, <8 x i1> %mask, i1 %cond) {
; CHECK-LABEL: @masked_load_and_scalar_select_cond(
; CHECK-NEXT: entry:
-; CHECK-NEXT: [[TMP0:%.*]] = call <8 x float> @llvm.masked.load.v8f32.p0v8f32(<8 x float>* [[PTR:%.*]], i32 32, <8 x i1> [[MASK:%.*]], <8 x float> undef)
+; CHECK-NEXT: [[TMP0:%.*]] = call <8 x float> @llvm.masked.load.v8f32.p0(ptr [[PTR:%.*]], i32 32, <8 x i1> [[MASK:%.*]], <8 x float> undef)
; CHECK-NEXT: [[TMP1:%.*]] = select i1 [[COND:%.*]], <8 x float> zeroinitializer, <8 x float> [[TMP0]]
; CHECK-NEXT: ret <8 x float> [[TMP1]]
entry:
- %0 = call <8 x float> @llvm.masked.load.v8f32.p0v8f32(<8 x float>* %ptr, i32 32, <8 x i1> %mask, <8 x float> undef)
+ %0 = call <8 x float> @llvm.masked.load.v8f32.p0(ptr %ptr, i32 32, <8 x i1> %mask, <8 x float> undef)
%1 = select i1 %cond, <8 x float> zeroinitializer, <8 x float> %0
ret <8 x float> %1
}
-declare <8 x float> @llvm.masked.load.v8f32.p0v8f32(<8 x float>*, i32 immarg, <8 x i1>, <8 x float>)
-declare <4 x i32> @llvm.masked.load.v4i32.p0v4i32(<4 x i32>*, i32 immarg, <4 x i1>, <4 x i32>)
-declare <4 x float> @llvm.masked.load.v4f32.p0v4f32(<4 x float>*, i32 immarg, <4 x i1>, <4 x float>)
+declare <8 x float> @llvm.masked.load.v8f32.p0(ptr, i32 immarg, <8 x i1>, <8 x float>)
+declare <4 x i32> @llvm.masked.load.v4i32.p0(ptr, i32 immarg, <4 x i1>, <4 x i32>)
+declare <4 x float> @llvm.masked.load.v4f32.p0(ptr, i32 immarg, <4 x i1>, <4 x float>)
ret <2 x i8> %sel
}
-define i5 @umin_umin_common_op_10(i1 %cond, i5 %x, i5 %y, i5 %z, i5* %p) {
+define i5 @umin_umin_common_op_10(i1 %cond, i5 %x, i5 %y, i5 %z, ptr %p) {
; CHECK-LABEL: @umin_umin_common_op_10(
; CHECK-NEXT: [[M1:%.*]] = call i5 @llvm.umin.i5(i5 [[X:%.*]], i5 [[Z:%.*]])
-; CHECK-NEXT: store i5 [[M1]], i5* [[P:%.*]], align 1
+; CHECK-NEXT: store i5 [[M1]], ptr [[P:%.*]], align 1
; CHECK-NEXT: [[MINMAXOP:%.*]] = select i1 [[COND:%.*]], i5 [[X]], i5 [[Y:%.*]]
; CHECK-NEXT: [[SEL:%.*]] = call i5 @llvm.umin.i5(i5 [[MINMAXOP]], i5 [[Z]])
; CHECK-NEXT: ret i5 [[SEL]]
;
%m1 = call i5 @llvm.umin.i5(i5 %x, i5 %z)
- store i5 %m1, i5* %p
+ store i5 %m1, ptr %p
%m2 = call i5 @llvm.umin.i5(i5 %z, i5 %y)
%sel = select i1 %cond, i5 %m1, i5 %m2
ret i5 %sel
}
-define <3 x i5> @umax_umax_common_op_11(i1 %cond, <3 x i5> %x, <3 x i5> %y, <3 x i5> %z, <3 x i5>* %p) {
+define <3 x i5> @umax_umax_common_op_11(i1 %cond, <3 x i5> %x, <3 x i5> %y, <3 x i5> %z, ptr %p) {
; CHECK-LABEL: @umax_umax_common_op_11(
; CHECK-NEXT: [[M2:%.*]] = call <3 x i5> @llvm.umax.v3i5(<3 x i5> [[Y:%.*]], <3 x i5> [[Z:%.*]])
-; CHECK-NEXT: store <3 x i5> [[M2]], <3 x i5>* [[P:%.*]], align 2
+; CHECK-NEXT: store <3 x i5> [[M2]], ptr [[P:%.*]], align 2
; CHECK-NEXT: [[MINMAXOP:%.*]] = select i1 [[COND:%.*]], <3 x i5> [[X:%.*]], <3 x i5> [[Y]]
; CHECK-NEXT: [[SEL:%.*]] = call <3 x i5> @llvm.umax.v3i5(<3 x i5> [[MINMAXOP]], <3 x i5> [[Z]])
; CHECK-NEXT: ret <3 x i5> [[SEL]]
;
%m1 = call <3 x i5> @llvm.umax.v3i5(<3 x i5> %x, <3 x i5> %z)
%m2 = call <3 x i5> @llvm.umax.v3i5(<3 x i5> %y, <3 x i5> %z)
- store <3 x i5> %m2, <3 x i5>* %p
+ store <3 x i5> %m2, ptr %p
%sel = select i1 %cond, <3 x i5> %m1, <3 x i5> %m2
ret <3 x i5> %sel
}
; negative test - too many uses
-define i5 @umin_umin_common_op_10_uses(i1 %cond, i5 %x, i5 %y, i5 %z, i5* %p1, i5* %p2) {
+define i5 @umin_umin_common_op_10_uses(i1 %cond, i5 %x, i5 %y, i5 %z, ptr %p1, ptr %p2) {
; CHECK-LABEL: @umin_umin_common_op_10_uses(
; CHECK-NEXT: [[M1:%.*]] = call i5 @llvm.umin.i5(i5 [[X:%.*]], i5 [[Z:%.*]])
-; CHECK-NEXT: store i5 [[M1]], i5* [[P1:%.*]], align 1
+; CHECK-NEXT: store i5 [[M1]], ptr [[P1:%.*]], align 1
; CHECK-NEXT: [[M2:%.*]] = call i5 @llvm.umin.i5(i5 [[Z]], i5 [[Y:%.*]])
-; CHECK-NEXT: store i5 [[M2]], i5* [[P2:%.*]], align 1
+; CHECK-NEXT: store i5 [[M2]], ptr [[P2:%.*]], align 1
; CHECK-NEXT: [[SEL:%.*]] = select i1 [[COND:%.*]], i5 [[M1]], i5 [[M2]]
; CHECK-NEXT: ret i5 [[SEL]]
;
%m1 = call i5 @llvm.umin.i5(i5 %x, i5 %z)
- store i5 %m1, i5* %p1
+ store i5 %m1, ptr %p1
%m2 = call i5 @llvm.umin.i5(i5 %z, i5 %y)
- store i5 %m2, i5* %p2
+ store i5 %m2, ptr %p2
%sel = select i1 %cond, i5 %m1, i5 %m2
ret i5 %sel
}
define <2 x i1> @xor_and3(<2 x i1> %c, <2 x i32> %X, <2 x i32> %Y) {
; CHECK-LABEL: @xor_and3(
; CHECK-NEXT: [[COMP:%.*]] = icmp uge <2 x i32> [[X:%.*]], [[Y:%.*]]
-; CHECK-NEXT: [[SEL:%.*]] = select <2 x i1> [[C:%.*]], <2 x i1> [[COMP]], <2 x i1> <i1 icmp ne (i8* inttoptr (i64 1234 to i8*), i8* @glb), i1 true>
+; CHECK-NEXT: [[SEL:%.*]] = select <2 x i1> [[C:%.*]], <2 x i1> [[COMP]], <2 x i1> <i1 icmp ne (ptr inttoptr (i64 1234 to ptr), ptr @glb), i1 true>
; CHECK-NEXT: ret <2 x i1> [[SEL]]
;
%comp = icmp ult <2 x i32> %X, %Y
- %sel = select <2 x i1> %c, <2 x i1> %comp, <2 x i1> <i1 icmp eq (i8* @glb, i8* inttoptr (i64 1234 to i8*)), i1 false>
+ %sel = select <2 x i1> %c, <2 x i1> %comp, <2 x i1> <i1 icmp eq (ptr @glb, ptr inttoptr (i64 1234 to ptr)), i1 false>
%res = xor <2 x i1> %sel, <i1 true, i1 true>
ret <2 x i1> %res
}
define <2 x i1> @xor_or3(<2 x i1> %c, <2 x i32> %X, <2 x i32> %Y) {
; CHECK-LABEL: @xor_or3(
; CHECK-NEXT: [[COMP:%.*]] = icmp uge <2 x i32> [[X:%.*]], [[Y:%.*]]
-; CHECK-NEXT: [[SEL:%.*]] = select <2 x i1> [[C:%.*]], <2 x i1> <i1 icmp ne (i8* inttoptr (i64 1234 to i8*), i8* @glb), i1 true>, <2 x i1> [[COMP]]
+; CHECK-NEXT: [[SEL:%.*]] = select <2 x i1> [[C:%.*]], <2 x i1> <i1 icmp ne (ptr inttoptr (i64 1234 to ptr), ptr @glb), i1 true>, <2 x i1> [[COMP]]
; CHECK-NEXT: ret <2 x i1> [[SEL]]
;
%comp = icmp ult <2 x i32> %X, %Y
- %sel = select <2 x i1> %c, <2 x i1> <i1 icmp eq (i8* @glb, i8* inttoptr (i64 1234 to i8*)), i1 false>, <2 x i1> %comp
+ %sel = select <2 x i1> %c, <2 x i1> <i1 icmp eq (ptr @glb, ptr inttoptr (i64 1234 to ptr)), i1 false>, <2 x i1> %comp
%res = xor <2 x i1> %sel, <i1 true, i1 true>
ret <2 x i1> %res
}
ret i8 %trunc
}
-define void @min_max_bitcast(<4 x float> %a, <4 x float> %b, <4 x i32>* %ptr1, <4 x i32>* %ptr2) {
+define void @min_max_bitcast(<4 x float> %a, <4 x float> %b, ptr %ptr1, ptr %ptr2) {
; CHECK-LABEL: @min_max_bitcast(
; CHECK-NEXT: [[CMP:%.*]] = fcmp olt <4 x float> [[A:%.*]], [[B:%.*]]
; CHECK-NEXT: [[SEL1_V:%.*]] = select <4 x i1> [[CMP]], <4 x float> [[A]], <4 x float> [[B]], !prof [[PROF0]]
; CHECK-NEXT: [[SEL2_V:%.*]] = select <4 x i1> [[CMP]], <4 x float> [[B]], <4 x float> [[A]], !prof [[PROF0]]
-; CHECK-NEXT: [[TMP1:%.*]] = bitcast <4 x i32>* [[PTR1:%.*]] to <4 x float>*
-; CHECK-NEXT: store <4 x float> [[SEL1_V]], <4 x float>* [[TMP1]], align 16
-; CHECK-NEXT: [[TMP2:%.*]] = bitcast <4 x i32>* [[PTR2:%.*]] to <4 x float>*
-; CHECK-NEXT: store <4 x float> [[SEL2_V]], <4 x float>* [[TMP2]], align 16
+; CHECK-NEXT: store <4 x float> [[SEL1_V]], ptr [[PTR1:%.*]], align 16
+; CHECK-NEXT: store <4 x float> [[SEL2_V]], ptr [[PTR2:%.*]], align 16
; CHECK-NEXT: ret void
;
%cmp = fcmp olt <4 x float> %a, %b
%bc2 = bitcast <4 x float> %b to <4 x i32>
%sel1 = select <4 x i1> %cmp, <4 x i32> %bc1, <4 x i32> %bc2, !prof !1
%sel2 = select <4 x i1> %cmp, <4 x i32> %bc2, <4 x i32> %bc1, !prof !1
- store <4 x i32> %sel1, <4 x i32>* %ptr1
- store <4 x i32> %sel2, <4 x i32>* %ptr2
+ store <4 x i32> %sel1, ptr %ptr1
+ store <4 x i32> %sel2, ptr %ptr2
ret void
}
;
%B = icmp eq i32 %A, %A
; Never true
- %C = icmp eq i32* @X, null
+ %C = icmp eq ptr @X, null
%D = and i1 %B, %C
ret i1 %D
}
;
%B = icmp eq i32 %A, %A
; Never true
- %C = icmp eq i32* @X, null
+ %C = icmp eq ptr @X, null
%D = select i1 %B, i1 %C, i1 false
ret i1 %D
}
;
%B = icmp ne i32 %A, %A
; Never false
- %C = icmp ne i32* @X, null
+ %C = icmp ne ptr @X, null
%D = or i1 %B, %C
ret i1 %D
}
;
%B = icmp ne i32 %A, %A
; Never false
- %C = icmp ne i32* @X, null
+ %C = icmp ne ptr @X, null
%D = select i1 %B, i1 true, i1 %C
ret i1 %D
}
ret i64 %s
}
-define i32 @test8(i8 %a, i32 %f, i1 %p, i32* %z) {
+define i32 @test8(i8 %a, i32 %f, i1 %p, ptr %z) {
; CHECK-LABEL: @test8(
; CHECK-NEXT: [[D:%.*]] = lshr i32 [[F:%.*]], 24
; CHECK-NEXT: [[N:%.*]] = select i1 [[P:%.*]], i32 [[D]], i32 0
ret <2 x i32> %D
}
-define void @test11(<2 x i16> %srcA, <2 x i16> %srcB, <2 x i16>* %dst) {
+define void @test11(<2 x i16> %srcA, <2 x i16> %srcB, ptr %dst) {
; CHECK-LABEL: @test11(
; CHECK-NEXT: [[CMP:%.*]] = icmp eq <2 x i16> [[SRCB:%.*]], [[SRCA:%.*]]
; CHECK-NEXT: [[SEXT:%.*]] = sext <2 x i1> [[CMP]] to <2 x i16>
-; CHECK-NEXT: store <2 x i16> [[SEXT]], <2 x i16>* [[DST:%.*]], align 4
+; CHECK-NEXT: store <2 x i16> [[SEXT]], ptr [[DST:%.*]], align 4
; CHECK-NEXT: ret void
;
%cmp = icmp eq <2 x i16> %srcB, %srcA
%sext = sext <2 x i1> %cmp to <2 x i16>
%tmask = ashr <2 x i16> %sext, <i16 15, i16 15>
- store <2 x i16> %tmask, <2 x i16>* %dst
+ store <2 x i16> %tmask, ptr %dst
ret void
}
; extra use is ok and in this case the result can be simplified to a constant
-define i32 @ashr_add_nuw(i32 %x, i32* %p) {
+define i32 @ashr_add_nuw(i32 %x, ptr %p) {
; CHECK-LABEL: @ashr_add_nuw(
; CHECK-NEXT: [[A:%.*]] = add nuw i32 [[X:%.*]], 5
-; CHECK-NEXT: store i32 [[A]], i32* [[P:%.*]], align 4
+; CHECK-NEXT: store i32 [[A]], ptr [[P:%.*]], align 4
; CHECK-NEXT: ret i32 -1
;
%a = add nuw i32 %x, 5
- store i32 %a, i32* %p
+ store i32 %a, ptr %p
%r = ashr i32 -6, %a
ret i32 %r
}
; vectors and extra uses are allowed
; nuw propagates to the new shift
-define <2 x i8> @shl_nuw_add_negative_splat_uses(<2 x i8> %x, <2 x i8>* %p) {
+define <2 x i8> @shl_nuw_add_negative_splat_uses(<2 x i8> %x, ptr %p) {
; CHECK-LABEL: @shl_nuw_add_negative_splat_uses(
; CHECK-NEXT: [[A:%.*]] = add <2 x i8> [[X:%.*]], <i8 -2, i8 -2>
-; CHECK-NEXT: store <2 x i8> [[A]], <2 x i8>* [[P:%.*]], align 2
+; CHECK-NEXT: store <2 x i8> [[A]], ptr [[P:%.*]], align 2
; CHECK-NEXT: [[R:%.*]] = shl nuw <2 x i8> <i8 3, i8 3>, [[X]]
; CHECK-NEXT: ret <2 x i8> [[R]]
;
%a = add <2 x i8> %x, <i8 -2, i8 -2>
- store <2 x i8> %a, <2 x i8>* %p
+ store <2 x i8> %a, ptr %p
%r = shl nuw <2 x i8> <i8 12, i8 12>, %a
ret <2 x i8> %r
}
define i1 @constantexpr() {
; CHECK-LABEL: @constantexpr(
; CHECK-NEXT: entry:
-; CHECK-NEXT: [[TMP0:%.*]] = load i16, i16* @f.a, align 2
+; CHECK-NEXT: [[TMP0:%.*]] = load i16, ptr @f.a, align 2
; CHECK-NEXT: [[TMP1:%.*]] = lshr i16 [[TMP0]], 1
-; CHECK-NEXT: [[TMP2:%.*]] = and i16 [[TMP1]], shl (i16 1, i16 zext (i1 icmp ne (i16 ptrtoint (i16* @f.a to i16), i16 1) to i16))
+; CHECK-NEXT: [[TMP2:%.*]] = and i16 [[TMP1]], shl (i16 1, i16 zext (i1 icmp ne (i16 ptrtoint (ptr @f.a to i16), i16 1) to i16))
; CHECK-NEXT: [[TOBOOL:%.*]] = icmp ne i16 [[TMP2]], 0
; CHECK-NEXT: ret i1 [[TOBOOL]]
;
entry:
- %0 = load i16, i16* @f.a
+ %0 = load i16, ptr @f.a
%shr = ashr i16 %0, 1
- %shr1 = ashr i16 %shr, zext (i1 icmp ne (i16 ptrtoint (i16* @f.a to i16), i16 1) to i16)
+ %shr1 = ashr i16 %shr, zext (i1 icmp ne (i16 ptrtoint (ptr @f.a to i16), i16 1) to i16)
%and = and i16 %shr1, 1
%tobool = icmp ne i16 %and, 0
ret i1 %tobool
@Y16 = global i16 42
define i16 @t01(i32 %x) {
; CHECK-LABEL: @t01(
-; CHECK-NEXT: [[T0:%.*]] = shl i32 [[X:%.*]], ptrtoint (i32* @Y32 to i32)
+; CHECK-NEXT: [[T0:%.*]] = shl i32 [[X:%.*]], ptrtoint (ptr @Y32 to i32)
; CHECK-NEXT: [[T1:%.*]] = trunc i32 [[T0]] to i16
-; CHECK-NEXT: [[T2:%.*]] = shl i16 [[T1]], ptrtoint (i16* @Y16 to i16)
+; CHECK-NEXT: [[T2:%.*]] = shl i16 [[T1]], ptrtoint (ptr @Y16 to i16)
; CHECK-NEXT: ret i16 [[T2]]
;
- %t0 = shl i32 %x, ptrtoint (i32* @Y32 to i32)
+ %t0 = shl i32 %x, ptrtoint (ptr @Y32 to i32)
%t1 = trunc i32 %t0 to i16
- %t2 = shl i16 %t1, ptrtoint (i16* @Y16 to i16)
+ %t2 = shl i16 %t1, ptrtoint (ptr @Y16 to i16)
ret i16 %t2
}
; CHECK-NEXT: ret i64 0
;
%A = alloca i64
- %L = load i64, i64* %A
- %V = add i64 ptrtoint (i32* @X to i64), 0
+ %L = load i64, ptr %A
+ %V = add i64 ptrtoint (ptr @X to i64), 0
%B2 = shl i64 %V, 0
%B4 = ashr i64 %B2, %L
%B = and i64 undef, %B4
ret <2 x i32> %sh1
}
-define i32 @lshr_or_extra_use(i32 %x, i32 %y, i32* %p) {
+define i32 @lshr_or_extra_use(i32 %x, i32 %y, ptr %p) {
; CHECK-LABEL: @lshr_or_extra_use(
; CHECK-NEXT: [[SH0:%.*]] = lshr i32 [[X:%.*]], 5
; CHECK-NEXT: [[R:%.*]] = or i32 [[SH0]], [[Y:%.*]]
-; CHECK-NEXT: store i32 [[R]], i32* [[P:%.*]], align 4
+; CHECK-NEXT: store i32 [[R]], ptr [[P:%.*]], align 4
; CHECK-NEXT: [[SH1:%.*]] = lshr i32 [[R]], 7
; CHECK-NEXT: ret i32 [[SH1]]
;
%sh0 = lshr i32 %x, 5
%r = or i32 %sh0, %y
- store i32 %r, i32* %p
+ store i32 %r, ptr %p
%sh1 = lshr i32 %r, 7
ret i32 %sh1
}
define i32 @PR44028(i32 %x) {
; CHECK-LABEL: @PR44028(
; CHECK-NEXT: [[SH1:%.*]] = ashr exact i32 [[X:%.*]], 16
-; CHECK-NEXT: [[T0:%.*]] = xor i32 [[SH1]], shl (i32 ptrtoint (i32* @g to i32), i32 16)
+; CHECK-NEXT: [[T0:%.*]] = xor i32 [[SH1]], shl (i32 ptrtoint (ptr @g to i32), i32 16)
; CHECK-NEXT: [[T27:%.*]] = ashr exact i32 [[T0]], 16
; CHECK-NEXT: ret i32 [[T27]]
;
%sh1 = ashr exact i32 %x, 16
- %t0 = xor i32 %sh1, shl (i32 ptrtoint (i32* @g to i32), i32 16)
+ %t0 = xor i32 %sh1, shl (i32 ptrtoint (ptr @g to i32), i32 16)
%t27 = ashr exact i32 %t0, 16
ret i32 %t27
}
; Converting the 2 shifts to SHL 6 without the AND is wrong.
; https://llvm.org/bugs/show_bug.cgi?id=8547
-define i32 @pr8547(i32* %g) {
+define i32 @pr8547(ptr %g) {
; CHECK-LABEL: @pr8547(
; CHECK-NEXT: codeRepl:
; CHECK-NEXT: br label [[FOR_COND:%.*]]
; CHECK: for.cond:
; CHECK-NEXT: [[STOREMERGE:%.*]] = phi i32 [ 0, [[CODEREPL:%.*]] ], [ 5, [[FOR_COND]] ]
-; CHECK-NEXT: store i32 [[STOREMERGE]], i32* [[G:%.*]], align 4
+; CHECK-NEXT: store i32 [[STOREMERGE]], ptr [[G:%.*]], align 4
; CHECK-NEXT: [[TMP0:%.*]] = shl nuw nsw i32 [[STOREMERGE]], 6
; CHECK-NEXT: [[CONV2:%.*]] = and i32 [[TMP0]], 64
; CHECK-NEXT: [[TOBOOL:%.*]] = icmp eq i32 [[CONV2]], 0
for.cond:
%storemerge = phi i32 [ 0, %codeRepl ], [ 5, %for.cond ]
- store i32 %storemerge, i32* %g, align 4
+ store i32 %storemerge, ptr %g, align 4
%shl = shl i32 %storemerge, 30
%conv2 = lshr i32 %shl, 24
%tobool = icmp eq i32 %conv2, 0
; OSS Fuzz #4871
; https://bugs.chromium.org/p/oss-fuzz/issues/detail?id=4871
-define i177 @lshr_out_of_range(i177 %Y, i177** %A2, i177*** %ptr) {
+define i177 @lshr_out_of_range(i177 %Y, ptr %A2, ptr %ptr) {
; CHECK-LABEL: @lshr_out_of_range(
; CHECK-NEXT: [[TMP1:%.*]] = icmp ne i177 [[Y:%.*]], -1
; CHECK-NEXT: [[B4:%.*]] = sext i1 [[TMP1]] to i177
; CHECK-NEXT: [[C8:%.*]] = icmp ult i177 [[B4]], [[Y]]
; CHECK-NEXT: [[TMP2:%.*]] = sext i1 [[C8]] to i64
-; CHECK-NEXT: [[G18:%.*]] = getelementptr i177*, i177** [[A2:%.*]], i64 [[TMP2]]
-; CHECK-NEXT: store i177** [[G18]], i177*** [[PTR:%.*]], align 8
+; CHECK-NEXT: [[G18:%.*]] = getelementptr ptr, ptr [[A2:%.*]], i64 [[TMP2]]
+; CHECK-NEXT: store ptr [[G18]], ptr [[PTR:%.*]], align 8
; CHECK-NEXT: ret i177 0
;
%B5 = udiv i177 %Y, -1
%B10 = sub i177 %B5, %B3
%B12 = lshr i177 %Y, %B6
%C8 = icmp ugt i177 %B12, %B4
- %G18 = getelementptr i177*, i177** %A2, i1 %C8
- store i177** %G18, i177*** %ptr
+ %G18 = getelementptr ptr, ptr %A2, i1 %C8
+ store ptr %G18, ptr %ptr
%B1 = udiv i177 %B10, %B6
ret i177 %B1
}
; OSS Fuzz #26716
; https://bugs.chromium.org/p/oss-fuzz/issues/detail?id=26716
-define i177 @lshr_out_of_range2(i177 %Y, i177** %A2, i177*** %ptr) {
+define i177 @lshr_out_of_range2(i177 %Y, ptr %A2, ptr %ptr) {
; CHECK-LABEL: @lshr_out_of_range2(
; CHECK-NEXT: ret i177 0
;
%B6 = mul i177 %B5, %B2
%B12 = lshr i177 %Y, %B6
%C8 = icmp ugt i177 %B12, %B4
- %G18 = getelementptr i177*, i177** %A2, i1 %C8
- store i177** %G18, i177*** %ptr, align 8
+ %G18 = getelementptr ptr, ptr %A2, i1 %C8
+ store ptr %G18, ptr %ptr, align 8
%B1 = udiv i177 %B5, %B6
ret i177 %B1
}
; OSS Fuzz #5032
; https://bugs.chromium.org/p/oss-fuzz/issues/detail?id=5032
-define void @ashr_out_of_range(i177* %A) {
+define void @ashr_out_of_range(ptr %A) {
; CHECK-LABEL: @ashr_out_of_range(
-; CHECK-NEXT: [[L:%.*]] = load i177, i177* [[A:%.*]], align 4
+; CHECK-NEXT: [[L:%.*]] = load i177, ptr [[A:%.*]], align 4
; CHECK-NEXT: [[TMP1:%.*]] = icmp eq i177 [[L]], -1
; CHECK-NEXT: [[TMP2:%.*]] = select i1 [[TMP1]], i64 -1, i64 -2
-; CHECK-NEXT: [[G11:%.*]] = getelementptr i177, i177* [[A]], i64 [[TMP2]]
-; CHECK-NEXT: [[L7:%.*]] = load i177, i177* [[G11]], align 4
+; CHECK-NEXT: [[G11:%.*]] = getelementptr i177, ptr [[A]], i64 [[TMP2]]
+; CHECK-NEXT: [[L7:%.*]] = load i177, ptr [[G11]], align 4
; CHECK-NEXT: [[C171:%.*]] = icmp slt i177 [[L7]], 0
; CHECK-NEXT: [[C17:%.*]] = select i1 [[TMP1]], i1 [[C171]], i1 false
; CHECK-NEXT: [[TMP3:%.*]] = sext i1 [[C17]] to i64
-; CHECK-NEXT: [[G62:%.*]] = getelementptr i177, i177* [[G11]], i64 [[TMP3]]
+; CHECK-NEXT: [[G62:%.*]] = getelementptr i177, ptr [[G11]], i64 [[TMP3]]
; CHECK-NEXT: [[TMP4:%.*]] = icmp eq i177 [[L7]], -1
; CHECK-NEXT: [[B28:%.*]] = select i1 [[TMP4]], i177 0, i177 [[L7]]
-; CHECK-NEXT: store i177 [[B28]], i177* [[G62]], align 4
+; CHECK-NEXT: store i177 [[B28]], ptr [[G62]], align 4
; CHECK-NEXT: ret void
;
- %L = load i177, i177* %A
+ %L = load i177, ptr %A
%B5 = udiv i177 %L, -1
%B4 = add i177 %B5, -1
%B2 = add i177 %B4, -1
- %G11 = getelementptr i177, i177* %A, i177 %B2
- %L7 = load i177, i177* %G11
+ %G11 = getelementptr i177, ptr %A, i177 %B2
+ %L7 = load i177, ptr %G11
%B6 = mul i177 %B5, %B2
%B24 = ashr i177 %L7, %B6
%B36 = and i177 %L7, %B4
%C17 = icmp sgt i177 %B36, %B24
- %G62 = getelementptr i177, i177* %G11, i1 %C17
+ %G62 = getelementptr i177, ptr %G11, i1 %C17
%B28 = urem i177 %B24, %B6
- store i177 %B28, i177* %G62
+ store i177 %B28, ptr %G62
ret void
}
; OSS Fuzz #26135
; https://bugs.chromium.org/p/oss-fuzz/issues/detail?id=26135
-define void @ashr_out_of_range_1(i177* %A) {
+define void @ashr_out_of_range_1(ptr %A) {
; CHECK-LABEL: @ashr_out_of_range_1(
-; CHECK-NEXT: [[L:%.*]] = load i177, i177* [[A:%.*]], align 4
-; CHECK-NEXT: [[G11:%.*]] = getelementptr i177, i177* [[A]], i64 -1
+; CHECK-NEXT: [[L:%.*]] = load i177, ptr [[A:%.*]], align 4
+; CHECK-NEXT: [[G11:%.*]] = getelementptr i177, ptr [[A]], i64 -1
; CHECK-NEXT: [[B24_LOBIT:%.*]] = ashr i177 [[L]], 175
; CHECK-NEXT: [[TMP1:%.*]] = trunc i177 [[B24_LOBIT]] to i64
-; CHECK-NEXT: [[G62:%.*]] = getelementptr i177, i177* [[G11]], i64 [[TMP1]]
-; CHECK-NEXT: store i177 0, i177* [[G62]], align 4
+; CHECK-NEXT: [[G62:%.*]] = getelementptr i177, ptr [[G11]], i64 [[TMP1]]
+; CHECK-NEXT: store i177 0, ptr [[G62]], align 4
; CHECK-NEXT: ret void
;
- %L = load i177, i177* %A, align 4
+ %L = load i177, ptr %A, align 4
%B5 = udiv i177 %L, -1
%B4 = add i177 %B5, -1
%B = and i177 %B4, %L
%B2 = add i177 %B, -1
- %G11 = getelementptr i177, i177* %A, i177 %B2
+ %G11 = getelementptr i177, ptr %A, i177 %B2
%B6 = mul i177 %B5, %B2
%B24 = ashr i177 %L, %B6
%C17 = icmp sgt i177 %B, %B24
- %G62 = getelementptr i177, i177* %G11, i1 %C17
+ %G62 = getelementptr i177, ptr %G11, i1 %C17
%B28 = urem i177 %B24, %B6
- store i177 %B28, i177* %G62, align 4
+ store i177 %B28, ptr %G62, align 4
ret void
}
; OSS Fuzz #38078
; https://bugs.chromium.org/p/oss-fuzz/issues/detail?id=38078
-define void @ossfuzz_38078(i32 %arg, i32 %arg1, i32* %ptr, i1* %ptr2, i32* %ptr3, i1* %ptr4, i32* %ptr5, i32* %ptr6, i1* %ptr7) {
+define void @ossfuzz_38078(i32 %arg, i32 %arg1, ptr %ptr, ptr %ptr2, ptr %ptr3, ptr %ptr4, ptr %ptr5, ptr %ptr6, ptr %ptr7) {
; CHECK-LABEL: @ossfuzz_38078(
; CHECK-NEXT: bb:
; CHECK-NEXT: [[I2:%.*]] = add nsw i32 [[ARG:%.*]], [[ARG1:%.*]]
; CHECK-NEXT: [[B3:%.*]] = or i32 [[I2]], 2147483647
-; CHECK-NEXT: [[G1:%.*]] = getelementptr i32, i32* [[PTR:%.*]], i64 -1
+; CHECK-NEXT: [[G1:%.*]] = getelementptr i32, ptr [[PTR:%.*]], i64 -1
; CHECK-NEXT: [[I5:%.*]] = icmp eq i32 [[I2]], 0
; CHECK-NEXT: call void @llvm.assume(i1 [[I5]])
-; CHECK-NEXT: store volatile i32 [[B3]], i32* [[G1]], align 4
+; CHECK-NEXT: store volatile i32 [[B3]], ptr [[G1]], align 4
; CHECK-NEXT: br label [[BB:%.*]]
; CHECK: BB:
; CHECK-NEXT: unreachable
%C2 = icmp sge i1 %C1, false
%C7 = icmp sle i32 %i3, %B16
%B20 = xor i32 %B21, %B22
- %G1 = getelementptr i32, i32* %ptr, i32 %B22
+ %G1 = getelementptr i32, ptr %ptr, i32 %B22
%B1 = sub i32 %B, 0
%B26 = ashr i32 %B29, 0
%B4 = add i32 0, %B5
%i5 = icmp eq i32 %B20, %B18
%C11 = icmp ugt i32 %i4, %B4
call void @llvm.assume(i1 %i5)
- store volatile i32 %B4, i32* %G1, align 4
+ store volatile i32 %B4, ptr %G1, align 4
%B11 = or i32 0, %B23
br label %BB
BB:
- store i1 %C7, i1* %ptr2, align 1
- store i32 %B11, i32* %ptr3, align 4
- store i1 %C11, i1* %ptr4, align 1
- store i32 %B1, i32* %ptr5, align 4
- store i32 %B27, i32* %ptr6, align 4
+ store i1 %C7, ptr %ptr2, align 1
+ store i32 %B11, ptr %ptr3, align 4
+ store i1 %C11, ptr %ptr4, align 1
+ store i32 %B1, ptr %ptr5, align 4
+ store i32 %B27, ptr %ptr6, align 4
%C = icmp ne i32 %B26, 0
%B17 = or i1 %C, %C2
- store i1 %B17, i1* %ptr7, align 1
+ store i1 %B17, ptr %ptr7, align 1
unreachable
}
declare void @llvm.assume(i1 noundef)
; Extra use
; Fold happened
-define i1 @scalar_shl_and_negC_eq_extra_use_shl(i32 %x, i32 %y, i32 %z, i32* %p) {
+define i1 @scalar_shl_and_negC_eq_extra_use_shl(i32 %x, i32 %y, i32 %z, ptr %p) {
; CHECK-LABEL: @scalar_shl_and_negC_eq_extra_use_shl(
; CHECK-NEXT: [[SHL:%.*]] = shl i32 [[X:%.*]], [[Y:%.*]]
; CHECK-NEXT: [[XOR:%.*]] = xor i32 [[SHL]], [[Z:%.*]]
-; CHECK-NEXT: store i32 [[XOR]], i32* [[P:%.*]], align 4
+; CHECK-NEXT: store i32 [[XOR]], ptr [[P:%.*]], align 4
; CHECK-NEXT: [[R:%.*]] = icmp ult i32 [[SHL]], 8
; CHECK-NEXT: ret i1 [[R]]
;
%shl = shl i32 %x, %y
%xor = xor i32 %shl, %z ; extra use of shl
- store i32 %xor, i32* %p
+ store i32 %xor, ptr %p
%and = and i32 %shl, 4294967288 ; ~7
%r = icmp eq i32 %and, 0
ret i1 %r
}
; Not fold
-define i1 @scalar_shl_and_negC_eq_extra_use_and(i32 %x, i32 %y, i32 %z, i32* %p) {
+define i1 @scalar_shl_and_negC_eq_extra_use_and(i32 %x, i32 %y, i32 %z, ptr %p) {
; CHECK-LABEL: @scalar_shl_and_negC_eq_extra_use_and(
; CHECK-NEXT: [[SHL:%.*]] = shl i32 [[X:%.*]], [[Y:%.*]]
; CHECK-NEXT: [[AND:%.*]] = and i32 [[SHL]], -8
; CHECK-NEXT: [[MUL:%.*]] = mul i32 [[AND]], [[Z:%.*]]
-; CHECK-NEXT: store i32 [[MUL]], i32* [[P:%.*]], align 4
+; CHECK-NEXT: store i32 [[MUL]], ptr [[P:%.*]], align 4
; CHECK-NEXT: [[R:%.*]] = icmp eq i32 [[AND]], 0
; CHECK-NEXT: ret i1 [[R]]
;
%shl = shl i32 %x, %y
%and = and i32 %shl, 4294967288 ; ~7
%mul = mul i32 %and, %z ; extra use of and
- store i32 %mul, i32* %p
+ store i32 %mul, ptr %p
%r = icmp eq i32 %and, 0
ret i1 %r
}
; Not fold
-define i1 @scalar_shl_and_negC_eq_extra_use_shl_and(i32 %x, i32 %y, i32 %z, i32* %p, i32* %q) {
+define i1 @scalar_shl_and_negC_eq_extra_use_shl_and(i32 %x, i32 %y, i32 %z, ptr %p, ptr %q) {
; CHECK-LABEL: @scalar_shl_and_negC_eq_extra_use_shl_and(
; CHECK-NEXT: [[SHL:%.*]] = shl i32 [[X:%.*]], [[Y:%.*]]
; CHECK-NEXT: [[AND:%.*]] = and i32 [[SHL]], -8
-; CHECK-NEXT: store i32 [[AND]], i32* [[P:%.*]], align 4
+; CHECK-NEXT: store i32 [[AND]], ptr [[P:%.*]], align 4
; CHECK-NEXT: [[ADD:%.*]] = add i32 [[SHL]], [[Z:%.*]]
-; CHECK-NEXT: store i32 [[ADD]], i32* [[Q:%.*]], align 4
+; CHECK-NEXT: store i32 [[ADD]], ptr [[Q:%.*]], align 4
; CHECK-NEXT: [[R:%.*]] = icmp eq i32 [[AND]], 0
; CHECK-NEXT: ret i1 [[R]]
;
%shl = shl i32 %x, %y
%and = and i32 %shl, 4294967288 ; ~7
- store i32 %and, i32* %p ; extra use of and
+ store i32 %and, ptr %p ; extra use of and
%add = add i32 %shl, %z ; extra use of shl
- store i32 %add, i32* %q
+ store i32 %add, ptr %q
%r = icmp eq i32 %and, 0
ret i1 %r
}
; Extra use
; Fold happened
-define i1 @scalar_shl_and_signbit_eq_extra_use_shl(i32 %x, i32 %y, i32 %z, i32* %p) {
+define i1 @scalar_shl_and_signbit_eq_extra_use_shl(i32 %x, i32 %y, i32 %z, ptr %p) {
; CHECK-LABEL: @scalar_shl_and_signbit_eq_extra_use_shl(
; CHECK-NEXT: [[SHL:%.*]] = shl i32 [[X:%.*]], [[Y:%.*]]
; CHECK-NEXT: [[XOR:%.*]] = xor i32 [[SHL]], [[Z:%.*]]
-; CHECK-NEXT: store i32 [[XOR]], i32* [[P:%.*]], align 4
+; CHECK-NEXT: store i32 [[XOR]], ptr [[P:%.*]], align 4
; CHECK-NEXT: [[R:%.*]] = icmp sgt i32 [[SHL]], -1
; CHECK-NEXT: ret i1 [[R]]
;
%shl = shl i32 %x, %y
%xor = xor i32 %shl, %z ; extra use of shl
- store i32 %xor, i32* %p
+ store i32 %xor, ptr %p
%and = and i32 %shl, 2147483648
%r = icmp eq i32 %and, 0
ret i1 %r
}
; Not fold
-define i1 @scalar_shl_and_signbit_eq_extra_use_and(i32 %x, i32 %y, i32 %z, i32* %p) {
+define i1 @scalar_shl_and_signbit_eq_extra_use_and(i32 %x, i32 %y, i32 %z, ptr %p) {
; CHECK-LABEL: @scalar_shl_and_signbit_eq_extra_use_and(
; CHECK-NEXT: [[SHL:%.*]] = shl i32 [[X:%.*]], [[Y:%.*]]
; CHECK-NEXT: [[AND:%.*]] = and i32 [[SHL]], -2147483648
; CHECK-NEXT: [[MUL:%.*]] = mul i32 [[AND]], [[Z:%.*]]
-; CHECK-NEXT: store i32 [[MUL]], i32* [[P:%.*]], align 4
+; CHECK-NEXT: store i32 [[MUL]], ptr [[P:%.*]], align 4
; CHECK-NEXT: [[R:%.*]] = icmp eq i32 [[AND]], 0
; CHECK-NEXT: ret i1 [[R]]
;
%shl = shl i32 %x, %y
%and = and i32 %shl, 2147483648
%mul = mul i32 %and, %z ; extra use of and
- store i32 %mul, i32* %p
+ store i32 %mul, ptr %p
%r = icmp eq i32 %and, 0
ret i1 %r
}
; Not fold
-define i1 @scalar_shl_and_signbit_eq_extra_use_shl_and(i32 %x, i32 %y, i32 %z, i32* %p, i32* %q) {
+define i1 @scalar_shl_and_signbit_eq_extra_use_shl_and(i32 %x, i32 %y, i32 %z, ptr %p, ptr %q) {
; CHECK-LABEL: @scalar_shl_and_signbit_eq_extra_use_shl_and(
; CHECK-NEXT: [[SHL:%.*]] = shl i32 [[X:%.*]], [[Y:%.*]]
; CHECK-NEXT: [[AND:%.*]] = and i32 [[SHL]], -2147483648
-; CHECK-NEXT: store i32 [[AND]], i32* [[P:%.*]], align 4
+; CHECK-NEXT: store i32 [[AND]], ptr [[P:%.*]], align 4
; CHECK-NEXT: [[ADD:%.*]] = add i32 [[SHL]], [[Z:%.*]]
-; CHECK-NEXT: store i32 [[ADD]], i32* [[Q:%.*]], align 4
+; CHECK-NEXT: store i32 [[ADD]], ptr [[Q:%.*]], align 4
; CHECK-NEXT: [[R:%.*]] = icmp eq i32 [[AND]], 0
; CHECK-NEXT: ret i1 [[R]]
;
%shl = shl i32 %x, %y
%and = and i32 %shl, 2147483648
- store i32 %and, i32* %p ; extra use of and
+ store i32 %and, ptr %p ; extra use of and
%add = add i32 %shl, %z ; extra use of shl
- store i32 %add, i32* %q
+ store i32 %add, ptr %q
%r = icmp eq i32 %and, 0
ret i1 %r
}
; Extra use
; Not simplified
-define i1 @scalar_i8_shl_ult_const_extra_use_shl(i8 %x, i8* %p) {
+define i1 @scalar_i8_shl_ult_const_extra_use_shl(i8 %x, ptr %p) {
; CHECK-LABEL: @scalar_i8_shl_ult_const_extra_use_shl(
; CHECK-NEXT: [[SHL:%.*]] = shl i8 [[X:%.*]], 5
-; CHECK-NEXT: store i8 [[SHL]], i8* [[P:%.*]], align 1
+; CHECK-NEXT: store i8 [[SHL]], ptr [[P:%.*]], align 1
; CHECK-NEXT: [[CMP:%.*]] = icmp ult i8 [[SHL]], 64
; CHECK-NEXT: ret i1 [[CMP]]
;
%shl = shl i8 %x, 5
- store i8 %shl, i8* %p
+ store i8 %shl, ptr %p
%cmp = icmp ult i8 %shl, 64
ret i1 %cmp
}
declare void @use(<4 x i16>)
-define void @test(<16 x i8> %w, i32* %o1, float* %o2) {
+define void @test(<16 x i8> %w, ptr %o1, ptr %o2) {
; CHECK-LABEL: @test(
; CHECK-NEXT: [[V_BC:%.*]] = bitcast <16 x i8> [[W:%.*]] to <4 x i32>
; CHECK-NEXT: [[V_EXTRACT:%.*]] = extractelement <4 x i32> [[V_BC]], i64 3
; CHECK-NEXT: [[V_BC1:%.*]] = bitcast <16 x i8> [[W]] to <4 x float>
; CHECK-NEXT: [[V_EXTRACT2:%.*]] = extractelement <4 x float> [[V_BC1]], i64 3
-; CHECK-NEXT: store i32 [[V_EXTRACT]], i32* [[O1:%.*]], align 4
-; CHECK-NEXT: store float [[V_EXTRACT2]], float* [[O2:%.*]], align 4
+; CHECK-NEXT: store i32 [[V_EXTRACT]], ptr [[O1:%.*]], align 4
+; CHECK-NEXT: store float [[V_EXTRACT2]], ptr [[O2:%.*]], align 4
; CHECK-NEXT: ret void
;
%v = shufflevector <16 x i8> %w, <16 x i8> poison, <4 x i32> <i32 12, i32 13, i32 14, i32 15>
%f = bitcast <4 x i8> %v to float
%i = bitcast <4 x i8> %v to i32
- store i32 %i, i32* %o1, align 4
- store float %f, float* %o2, align 4
+ store i32 %i, ptr %o1, align 4
+ store float %f, ptr %o2, align 4
ret void
}
declare void @use(<4 x i16>)
-define void @test(<16 x i8> %w, i32* %o1, float* %o2) {
+define void @test(<16 x i8> %w, ptr %o1, ptr %o2) {
; CHECK-LABEL: @test(
; CHECK-NEXT: [[V_BC:%.*]] = bitcast <16 x i8> [[W:%.*]] to <4 x i32>
; CHECK-NEXT: [[V_EXTRACT:%.*]] = extractelement <4 x i32> [[V_BC]], i64 3
; CHECK-NEXT: [[V_BC1:%.*]] = bitcast <16 x i8> [[W]] to <4 x float>
; CHECK-NEXT: [[V_EXTRACT2:%.*]] = extractelement <4 x float> [[V_BC1]], i64 3
-; CHECK-NEXT: store i32 [[V_EXTRACT]], i32* [[O1:%.*]], align 4
-; CHECK-NEXT: store float [[V_EXTRACT2]], float* [[O2:%.*]], align 4
+; CHECK-NEXT: store i32 [[V_EXTRACT]], ptr [[O1:%.*]], align 4
+; CHECK-NEXT: store float [[V_EXTRACT2]], ptr [[O2:%.*]], align 4
; CHECK-NEXT: ret void
;
%v = shufflevector <16 x i8> %w, <16 x i8> undef, <4 x i32> <i32 12, i32 13, i32 14, i32 15>
%f = bitcast <4 x i8> %v to float
%i = bitcast <4 x i8> %v to i32
- store i32 %i, i32* %o1, align 4
- store float %f, float* %o2, align 4
+ store i32 %i, ptr %o1, align 4
+ store float %f, ptr %o2, align 4
ret void
}
ret <2 x i4> %r
}
-define <2 x i4> @shuf_bitcast_inserti_use1(<2 x i8> %v, i8 %x, <2 x i8>* %p) {
+define <2 x i4> @shuf_bitcast_inserti_use1(<2 x i8> %v, i8 %x, ptr %p) {
; CHECK-LABEL: @shuf_bitcast_inserti_use1(
; CHECK-NEXT: [[I:%.*]] = insertelement <2 x i8> [[V:%.*]], i8 [[X:%.*]], i64 0
-; CHECK-NEXT: store <2 x i8> [[I]], <2 x i8>* [[P:%.*]], align 2
+; CHECK-NEXT: store <2 x i8> [[I]], ptr [[P:%.*]], align 2
; CHECK-NEXT: [[R:%.*]] = bitcast i8 [[X]] to <2 x i4>
; CHECK-NEXT: ret <2 x i4> [[R]]
;
%i = insertelement <2 x i8> %v, i8 %x, i32 0
- store <2 x i8> %i, <2 x i8>* %p
+ store <2 x i8> %i, ptr %p
%b = bitcast <2 x i8> %i to <4 x i4>
%r = shufflevector <4 x i4> %b, <4 x i4> undef, <2 x i32> <i32 0, i32 1>
ret <2 x i4> %r
}
-define <2 x i4> @shuf_bitcast_insert_use2(<2 x i8> %v, i8 %x, <4 x i4>* %p) {
+define <2 x i4> @shuf_bitcast_insert_use2(<2 x i8> %v, i8 %x, ptr %p) {
; CHECK-LABEL: @shuf_bitcast_insert_use2(
; CHECK-NEXT: [[I:%.*]] = insertelement <2 x i8> [[V:%.*]], i8 [[X:%.*]], i64 0
-; CHECK-NEXT: [[TMP1:%.*]] = bitcast <4 x i4>* [[P:%.*]] to <2 x i8>*
-; CHECK-NEXT: store <2 x i8> [[I]], <2 x i8>* [[TMP1]], align 2
+; CHECK-NEXT: store <2 x i8> [[I]], ptr [[P:%.*]], align 2
; CHECK-NEXT: [[R:%.*]] = bitcast i8 [[X]] to <2 x i4>
; CHECK-NEXT: ret <2 x i4> [[R]]
;
%i = insertelement <2 x i8> %v, i8 %x, i32 0
%b = bitcast <2 x i8> %i to <4 x i4>
- store <4 x i4> %b, <4 x i4>* %p
+ store <4 x i4> %b, ptr %p
%r = shufflevector <4 x i4> %b, <4 x i4> undef, <2 x i32> <i32 0, i32 1>
ret <2 x i4> %r
}
; Extra use
; Fold happened
-define i1 @scalar_i32_signbit_lshr_and_eq_extra_use_lshr(i32 %x, i32 %y, i32 %z, i32* %p) {
+define i1 @scalar_i32_signbit_lshr_and_eq_extra_use_lshr(i32 %x, i32 %y, i32 %z, ptr %p) {
; CHECK-LABEL: @scalar_i32_signbit_lshr_and_eq_extra_use_lshr(
; CHECK-NEXT: [[LSHR:%.*]] = lshr i32 -2147483648, [[Y:%.*]]
; CHECK-NEXT: [[XOR:%.*]] = xor i32 [[LSHR]], [[Z:%.*]]
-; CHECK-NEXT: store i32 [[XOR]], i32* [[P:%.*]], align 4
+; CHECK-NEXT: store i32 [[XOR]], ptr [[P:%.*]], align 4
; CHECK-NEXT: [[AND:%.*]] = and i32 [[LSHR]], [[X:%.*]]
; CHECK-NEXT: [[R:%.*]] = icmp eq i32 [[AND]], 0
; CHECK-NEXT: ret i1 [[R]]
;
%lshr = lshr i32 2147483648, %y
%xor = xor i32 %lshr, %z ; extra use of lshr
- store i32 %xor, i32* %p
+ store i32 %xor, ptr %p
%and = and i32 %lshr, %x
%r = icmp eq i32 %and, 0
ret i1 %r
}
; Not fold
-define i1 @scalar_i32_signbit_lshr_and_eq_extra_use_and(i32 %x, i32 %y, i32 %z, i32* %p) {
+define i1 @scalar_i32_signbit_lshr_and_eq_extra_use_and(i32 %x, i32 %y, i32 %z, ptr %p) {
; CHECK-LABEL: @scalar_i32_signbit_lshr_and_eq_extra_use_and(
; CHECK-NEXT: [[LSHR:%.*]] = lshr i32 -2147483648, [[Y:%.*]]
; CHECK-NEXT: [[AND:%.*]] = and i32 [[LSHR]], [[X:%.*]]
; CHECK-NEXT: [[MUL:%.*]] = mul i32 [[AND]], [[Z:%.*]]
-; CHECK-NEXT: store i32 [[MUL]], i32* [[P:%.*]], align 4
+; CHECK-NEXT: store i32 [[MUL]], ptr [[P:%.*]], align 4
; CHECK-NEXT: [[R:%.*]] = icmp eq i32 [[AND]], 0
; CHECK-NEXT: ret i1 [[R]]
;
%lshr = lshr i32 2147483648, %y
%and = and i32 %lshr, %x
%mul = mul i32 %and, %z ; extra use of and
- store i32 %mul, i32* %p
+ store i32 %mul, ptr %p
%r = icmp eq i32 %and, 0
ret i1 %r
}
; Not fold
-define i1 @scalar_i32_signbit_lshr_and_eq_extra_use_lshr_and(i32 %x, i32 %y, i32 %z, i32* %p, i32* %q) {
+define i1 @scalar_i32_signbit_lshr_and_eq_extra_use_lshr_and(i32 %x, i32 %y, i32 %z, ptr %p, ptr %q) {
; CHECK-LABEL: @scalar_i32_signbit_lshr_and_eq_extra_use_lshr_and(
; CHECK-NEXT: [[LSHR:%.*]] = lshr i32 -2147483648, [[Y:%.*]]
; CHECK-NEXT: [[AND:%.*]] = and i32 [[LSHR]], [[X:%.*]]
-; CHECK-NEXT: store i32 [[AND]], i32* [[P:%.*]], align 4
+; CHECK-NEXT: store i32 [[AND]], ptr [[P:%.*]], align 4
; CHECK-NEXT: [[ADD:%.*]] = add i32 [[LSHR]], [[Z:%.*]]
-; CHECK-NEXT: store i32 [[ADD]], i32* [[Q:%.*]], align 4
+; CHECK-NEXT: store i32 [[ADD]], ptr [[Q:%.*]], align 4
; CHECK-NEXT: [[R:%.*]] = icmp eq i32 [[AND]], 0
; CHECK-NEXT: ret i1 [[R]]
;
%lshr = lshr i32 2147483648, %y
%and = and i32 %lshr, %x
- store i32 %and, i32* %p ; extra use of and
+ store i32 %and, ptr %p ; extra use of and
%add = add i32 %lshr, %z ; extra use of lshr
- store i32 %add, i32* %q
+ store i32 %add, ptr %q
%r = icmp eq i32 %and, 0
ret i1 %r
}
; Extra use
; Fold happened
-define i1 @scalar_i32_signbit_shl_and_eq_extra_use_shl(i32 %x, i32 %y, i32 %z, i32* %p) {
+define i1 @scalar_i32_signbit_shl_and_eq_extra_use_shl(i32 %x, i32 %y, i32 %z, ptr %p) {
; CHECK-LABEL: @scalar_i32_signbit_shl_and_eq_extra_use_shl(
; CHECK-NEXT: [[SHL:%.*]] = shl i32 -2147483648, [[Y:%.*]]
; CHECK-NEXT: [[XOR:%.*]] = xor i32 [[SHL]], [[Z:%.*]]
-; CHECK-NEXT: store i32 [[XOR]], i32* [[P:%.*]], align 4
+; CHECK-NEXT: store i32 [[XOR]], ptr [[P:%.*]], align 4
; CHECK-NEXT: [[AND:%.*]] = and i32 [[SHL]], [[X:%.*]]
; CHECK-NEXT: [[R:%.*]] = icmp eq i32 [[AND]], 0
; CHECK-NEXT: ret i1 [[R]]
;
%shl = shl i32 2147483648, %y
%xor = xor i32 %shl, %z ; extra use of shl
- store i32 %xor, i32* %p
+ store i32 %xor, ptr %p
%and = and i32 %shl, %x
%r = icmp eq i32 %and, 0
ret i1 %r
}
; Not fold
-define i1 @scalar_i32_signbit_shl_and_eq_extra_use_and(i32 %x, i32 %y, i32 %z, i32* %p) {
+define i1 @scalar_i32_signbit_shl_and_eq_extra_use_and(i32 %x, i32 %y, i32 %z, ptr %p) {
; CHECK-LABEL: @scalar_i32_signbit_shl_and_eq_extra_use_and(
; CHECK-NEXT: [[SHL:%.*]] = shl i32 -2147483648, [[Y:%.*]]
; CHECK-NEXT: [[AND:%.*]] = and i32 [[SHL]], [[X:%.*]]
; CHECK-NEXT: [[MUL:%.*]] = mul i32 [[AND]], [[Z:%.*]]
-; CHECK-NEXT: store i32 [[MUL]], i32* [[P:%.*]], align 4
+; CHECK-NEXT: store i32 [[MUL]], ptr [[P:%.*]], align 4
; CHECK-NEXT: [[R:%.*]] = icmp eq i32 [[AND]], 0
; CHECK-NEXT: ret i1 [[R]]
;
%shl = shl i32 2147483648, %y
%and = and i32 %shl, %x
%mul = mul i32 %and, %z ; extra use of and
- store i32 %mul, i32* %p
+ store i32 %mul, ptr %p
%r = icmp eq i32 %and, 0
ret i1 %r
}
; Not fold
-define i1 @scalar_i32_signbit_shl_and_eq_extra_use_shl_and(i32 %x, i32 %y, i32 %z, i32* %p, i32* %q) {
+define i1 @scalar_i32_signbit_shl_and_eq_extra_use_shl_and(i32 %x, i32 %y, i32 %z, ptr %p, ptr %q) {
; CHECK-LABEL: @scalar_i32_signbit_shl_and_eq_extra_use_shl_and(
; CHECK-NEXT: [[SHL:%.*]] = shl i32 -2147483648, [[Y:%.*]]
; CHECK-NEXT: [[AND:%.*]] = and i32 [[SHL]], [[X:%.*]]
-; CHECK-NEXT: store i32 [[AND]], i32* [[P:%.*]], align 4
+; CHECK-NEXT: store i32 [[AND]], ptr [[P:%.*]], align 4
; CHECK-NEXT: [[ADD:%.*]] = add i32 [[SHL]], [[Z:%.*]]
-; CHECK-NEXT: store i32 [[ADD]], i32* [[Q:%.*]], align 4
+; CHECK-NEXT: store i32 [[ADD]], ptr [[Q:%.*]], align 4
; CHECK-NEXT: [[R:%.*]] = icmp eq i32 [[AND]], 0
; CHECK-NEXT: ret i1 [[R]]
;
%shl = shl i32 2147483648, %y
%and = and i32 %shl, %x
- store i32 %and, i32* %p ; extra use of and
+ store i32 %and, ptr %p ; extra use of and
%add = add i32 %shl, %z ; extra use of shl
- store i32 %add, i32* %q
+ store i32 %add, ptr %q
%r = icmp eq i32 %and, 0
ret i1 %r
}
; RUN: opt < %s -passes=instcombine -S | FileCheck %s
; Fold
-; ((%x * %y) s/ %x) == %y
+; ((ptr %y) s/ %x) == %y
; to
; @llvm.smul.with.overflow(%x, %y) + extractvalue + not
; RUN: opt < %s -passes=instcombine -S | FileCheck %s
; Fold
-; ((%x * %y) s/ %x) != %y
+; ((ptr %y) s/ %x) != %y
; to
; @llvm.smul.with.overflow(%x, %y) + extractvalue
target datalayout = "e-p:64:64:64-i1:8:8-i8:8:8-i16:16:16-i32:32:32-i64:64:64-f32:32:32-f64:64:64-v64:64:64-v128:128:128-a0:0:64-s0:64:64-f80:128:128"
target triple = "x86_64-unknown-linux-gnu"
- %struct.VEC_rtx_base = type { i32, i32, [1 x %struct.rtx_def*] }
+ %struct.VEC_rtx_base = type { i32, i32, [1 x ptr] }
%struct.VEC_rtx_gc = type { %struct.VEC_rtx_base }
- %struct.block_symbol = type { [3 x %struct.rtunion], %struct.object_block*, i64 }
- %struct.object_block = type { %struct.section*, i32, i64, %struct.VEC_rtx_gc*, %struct.VEC_rtx_gc* }
+ %struct.block_symbol = type { [3 x %struct.rtunion], ptr, i64 }
+ %struct.object_block = type { ptr, i32, i64, ptr, ptr }
%struct.omp_clause_subcode = type { i32 }
- %struct.rtunion = type { i8* }
+ %struct.rtunion = type { ptr }
%struct.rtx_def = type { i16, i8, i8, %struct.u }
%struct.section = type { %struct.unnamed_section }
%struct.u = type { %struct.block_symbol }
- %struct.unnamed_section = type { %struct.omp_clause_subcode, void (i8*)*, i8*, %struct.section* }
+ %struct.unnamed_section = type { %struct.omp_clause_subcode, ptr, ptr, ptr }
-define fastcc void @cse_insn(%struct.rtx_def* %insn, %struct.rtx_def* %libcall_insn, i16* %ptr, i1 %c1, i1 %c2, i1 %c3, i1 %c4, i1 %c5, i1 %c6, i1 %c7, i1 %c8, i1 %c9) nounwind {
+define fastcc void @cse_insn(ptr %insn, ptr %libcall_insn, ptr %ptr, i1 %c1, i1 %c2, i1 %c3, i1 %c4, i1 %c5, i1 %c6, i1 %c7, i1 %c8, i1 %c9) nounwind {
entry:
br i1 %c1, label %bb43, label %bb88
unreachable
bb107: ; preds = %bb88
- %0 = load i16, i16* %ptr, align 8 ; <i16> [#uses=1]
+ %0 = load i16, ptr %ptr, align 8 ; <i16> [#uses=1]
%1 = icmp eq i16 %0, 38 ; <i1> [#uses=1]
- %src_eqv_here.0 = select i1 %1, %struct.rtx_def* null, %struct.rtx_def* null ; <%struct.rtx_def*> [#uses=1]
+ %src_eqv_here.0 = select i1 %1, ptr null, ptr null ; <ptr> [#uses=1]
br i1 %c3, label %bb127, label %bb125
bb125: ; preds = %bb107
br i1 %c7, label %bb180, label %bb186
bb180: ; preds = %bb146
- %2 = icmp eq %struct.rtx_def* null, null ; <i1> [#uses=1]
+ %2 = icmp eq ptr null, null ; <i1> [#uses=1]
%3 = zext i1 %2 to i8 ; <i8> [#uses=1]
- %4 = icmp ne %struct.rtx_def* %src_eqv_here.0, null ; <i1> [#uses=1]
+ %4 = icmp ne ptr %src_eqv_here.0, null ; <i1> [#uses=1]
%5 = zext i1 %4 to i8 ; <i8> [#uses=1]
%toBool181 = icmp ne i8 %3, 0 ; <i1> [#uses=1]
%toBool182 = icmp ne i8 %5, 0 ; <i1> [#uses=1]
%call = call fast double @exp(double %x) #1
%pow = call fast double @llvm.pow.f64(double %call, double %y)
%C1 = fcmp ule double %call, %pow
- store i1 %C1, i1* %A
+ store i1 %C1, ptr %A
ret double %pow
}
declare ptr @foo()
declare i32 @memcmp(ptr inreg nocapture noundef, ptr inreg nocapture noundef, i32 inreg noundef)
-declare i32 @printf(i8*, ...)
+declare i32 @printf(ptr, ...)
declare double @exp2(double)
-declare i32 @__sprintf_chk(i8*, i32, i32, i8*, ...)
+declare i32 @__sprintf_chk(ptr, i32, i32, ptr, ...)
@a = common global [60 x i8] zeroinitializer, align 1
@b = common global [60 x i8] zeroinitializer, align 1
@h = constant [2 x i8] c"h\00"
; CHECK-NOT: declare noundef i32 @putchar(i32 noundef)
define void @test_fewer_params_than_num_register_parameters() {
- %fmt = getelementptr [2 x i8], [2 x i8]* @h, i32 0, i32 0
- call i32 (i8*, ...) @printf(i8* %fmt)
+ call i32 (ptr, ...) @printf(ptr @h)
ret void
}
; CHECK: declare noundef i32 @sprintf(ptr noalias nocapture noundef writeonly, ptr nocapture noundef readonly, ...)
; CHECK-NOT: declare noundef i32 @sprintf(ptr inreg noalias nocapture noundef writeonly, ptr inreg nocapture noundef readonly, ...)
define i32 @test_variadic() {
- %dst = getelementptr inbounds [60 x i8], [60 x i8]* @a, i32 0, i32 0
- %fmt = getelementptr inbounds [60 x i8], [60 x i8]* @b, i32 0, i32 0
- %ret = call i32 (i8*, i32, i32, i8*, ...) @__sprintf_chk(i8* %dst, i32 0, i32 -1, i8* %fmt)
+ %ret = call i32 (ptr, i32, i32, ptr, ...) @__sprintf_chk(ptr @a, i32 0, i32 -1, ptr @b)
ret i32 %ret
}
define float @test_instbased_f32() {
; CHECK-FLOAT-IN-VEC-LABEL: @test_instbased_f32(
-; CHECK-FLOAT-IN-VEC-NEXT: [[VAL:%.*]] = load float, float* @var32, align 4
+; CHECK-FLOAT-IN-VEC-NEXT: [[VAL:%.*]] = load float, ptr @var32, align 4
; CHECK-FLOAT-IN-VEC-NEXT: [[SINCOSPI:%.*]] = call <2 x float> @__sincospif_stret(float [[VAL]])
; CHECK-FLOAT-IN-VEC-NEXT: [[SINPI:%.*]] = extractelement <2 x float> [[SINCOSPI]], i64 0
; CHECK-FLOAT-IN-VEC-NEXT: [[COSPI:%.*]] = extractelement <2 x float> [[SINCOSPI]], i64 1
; CHECK-FLOAT-IN-VEC-NEXT: ret float [[RES]]
;
; CHECK-LABEL: @test_instbased_f32(
-; CHECK-NEXT: [[VAL:%.*]] = load float, float* @var32, align 4
+; CHECK-NEXT: [[VAL:%.*]] = load float, ptr @var32, align 4
; CHECK-NEXT: [[SINCOSPI:%.*]] = call { float, float } @__sincospif_stret(float [[VAL]])
; CHECK-NEXT: [[SINPI:%.*]] = extractvalue { float, float } [[SINCOSPI]], 0
; CHECK-NEXT: [[COSPI:%.*]] = extractvalue { float, float } [[SINCOSPI]], 1
; CHECK-NEXT: ret float [[RES]]
;
; CHECK-NO-SINCOS-LABEL: @test_instbased_f32(
-; CHECK-NO-SINCOS-NEXT: [[VAL:%.*]] = load float, float* @var32, align 4
+; CHECK-NO-SINCOS-NEXT: [[VAL:%.*]] = load float, ptr @var32, align 4
; CHECK-NO-SINCOS-NEXT: [[SIN:%.*]] = call float @__sinpif(float [[VAL]]) #[[ATTR0:[0-9]+]]
; CHECK-NO-SINCOS-NEXT: [[COS:%.*]] = call float @__cospif(float [[VAL]]) #[[ATTR0]]
; CHECK-NO-SINCOS-NEXT: [[RES:%.*]] = fadd float [[SIN]], [[COS]]
; CHECK-NO-SINCOS-NEXT: ret float [[RES]]
;
- %val = load float, float* @var32
+ %val = load float, ptr @var32
%sin = call float @__sinpif(float %val) #0
%cos = call float @__cospif(float %val) #0
%res = fadd float %sin, %cos
define double @test_instbased_f64() {
; CHECK-FLOAT-IN-VEC-LABEL: @test_instbased_f64(
-; CHECK-FLOAT-IN-VEC-NEXT: [[VAL:%.*]] = load double, double* @var64, align 8
+; CHECK-FLOAT-IN-VEC-NEXT: [[VAL:%.*]] = load double, ptr @var64, align 8
; CHECK-FLOAT-IN-VEC-NEXT: [[SINCOSPI:%.*]] = call { double, double } @__sincospi_stret(double [[VAL]])
; CHECK-FLOAT-IN-VEC-NEXT: [[SINPI:%.*]] = extractvalue { double, double } [[SINCOSPI]], 0
; CHECK-FLOAT-IN-VEC-NEXT: [[COSPI:%.*]] = extractvalue { double, double } [[SINCOSPI]], 1
; CHECK-FLOAT-IN-VEC-NEXT: ret double [[RES]]
;
; CHECK-LABEL: @test_instbased_f64(
-; CHECK-NEXT: [[VAL:%.*]] = load double, double* @var64, align 8
+; CHECK-NEXT: [[VAL:%.*]] = load double, ptr @var64, align 8
; CHECK-NEXT: [[SINCOSPI:%.*]] = call { double, double } @__sincospi_stret(double [[VAL]])
; CHECK-NEXT: [[SINPI:%.*]] = extractvalue { double, double } [[SINCOSPI]], 0
; CHECK-NEXT: [[COSPI:%.*]] = extractvalue { double, double } [[SINCOSPI]], 1
; CHECK-NEXT: ret double [[RES]]
;
; CHECK-NO-SINCOS-LABEL: @test_instbased_f64(
-; CHECK-NO-SINCOS-NEXT: [[VAL:%.*]] = load double, double* @var64, align 8
+; CHECK-NO-SINCOS-NEXT: [[VAL:%.*]] = load double, ptr @var64, align 8
; CHECK-NO-SINCOS-NEXT: [[SIN:%.*]] = call double @__sinpi(double [[VAL]]) #[[ATTR0]]
; CHECK-NO-SINCOS-NEXT: [[COS:%.*]] = call double @__cospi(double [[VAL]]) #[[ATTR0]]
; CHECK-NO-SINCOS-NEXT: [[RES:%.*]] = fadd double [[SIN]], [[COS]]
; CHECK-NO-SINCOS-NEXT: ret double [[RES]]
;
- %val = load double, double* @var64
+ %val = load double, ptr @var64
%sin = call double @__sinpi(double %val) #0
%cos = call double @__cospi(double %val) #0
%res = fadd double %sin, %cos
}
-define double @test_fptr(double (double)* %fptr, double %p1) {
+define double @test_fptr(ptr %fptr, double %p1) {
; CHECK-FLOAT-IN-VEC-LABEL: @test_fptr(
; CHECK-FLOAT-IN-VEC-NEXT: [[SIN:%.*]] = call double @__sinpi(double [[P1:%.*]]) #[[ATTR0]]
; CHECK-FLOAT-IN-VEC-NEXT: [[COS:%.*]] = call double [[FPTR:%.*]](double [[P1]])
; Helper to generate branch conditions.
declare i1 @cond()
-declare i32* @use_and_return(i32*)
+declare ptr @use_and_return(ptr)
-declare i8* @llvm.stacksave() #0
+declare ptr @llvm.stacksave() #0
-declare void @llvm.stackrestore(i8*) #0
+declare void @llvm.stackrestore(ptr) #0
define void @foo(i32 %x) {
entry:
nonentry: ; preds = %entry
%argmem = alloca i32, i32 %x, align 4
- %sp = call i8* @llvm.stacksave()
+ %sp = call ptr @llvm.stacksave()
%c2 = call i1 @cond()
br i1 %c2, label %ret, label %sinktarget
sinktarget: ; preds = %nonentry
; Arrange for there to be a single use of %argmem by returning it.
- %p = call i32* @use_and_return(i32* nonnull %argmem)
- store i32 13, i32* %p, align 4
- call void @llvm.stackrestore(i8* %sp)
- %0 = call i32* @use_and_return(i32* %p)
+ %p = call ptr @use_and_return(ptr nonnull %argmem)
+ store i32 13, ptr %p, align 4
+ call void @llvm.stackrestore(ptr %sp)
+ %0 = call ptr @use_and_return(ptr %p)
br label %ret
ret: ; preds = %sinktarget, %nonentry, %entry
; CHECK-LABEL: define void @foo(i32 %x)
; CHECK: nonentry:
; CHECK: %argmem = alloca i32, i32 %x
-; CHECK: %sp = call i8* @llvm.stacksave()
+; CHECK: %sp = call ptr @llvm.stacksave()
; CHECK: %c2 = call i1 @cond()
; CHECK: br i1 %c2, label %ret, label %sinktarget
; CHECK: sinktarget:
-; CHECK: %p = call i32* @use_and_return(i32* nonnull %argmem)
-; CHECK: store i32 13, i32* %p
-; CHECK: call void @llvm.stackrestore(i8* %sp)
-; CHECK: %0 = call i32* @use_and_return(i32* nonnull %p)
+; CHECK: %p = call ptr @use_and_return(ptr nonnull %argmem)
+; CHECK: store i32 13, ptr %p
+; CHECK: call void @llvm.stackrestore(ptr %sp)
+; CHECK: %0 = call ptr @use_and_return(ptr nonnull %p)
attributes #0 = { nounwind }
%struct.B = type { i64, i64 }
-define void @test1(%struct.B* %p) personality i32 (...)* @__CxxFrameHandler3 {
+define void @test1(ptr %p) personality ptr @__CxxFrameHandler3 {
; CHECK-LABEL: @test1(
; CHECK-NEXT: invoke.cont:
-; CHECK-NEXT: [[TMP0:%.*]] = bitcast %struct.B* [[P:%.*]] to <2 x i64>*
-; CHECK-NEXT: [[TMP1:%.*]] = load <2 x i64>, <2 x i64>* [[TMP0]], align 8
+; CHECK-NEXT: [[TMP1:%.*]] = load <2 x i64>, ptr [[P:%.*]], align 8
; CHECK-NEXT: [[TMP2:%.*]] = extractelement <2 x i64> [[TMP1]], i64 0
; CHECK-NEXT: invoke void @throw()
; CHECK-NEXT: to label [[UNREACHABLE:%.*]] unwind label [[CATCH_DISPATCH:%.*]]
; CHECK: catch.dispatch:
; CHECK-NEXT: [[CS:%.*]] = catchswitch within none [label %invoke.cont1] unwind label [[EHCLEANUP:%.*]]
; CHECK: invoke.cont1:
-; CHECK-NEXT: [[CATCH:%.*]] = catchpad within [[CS]] [i8* null, i32 64, i8* null]
+; CHECK-NEXT: [[CATCH:%.*]] = catchpad within [[CS]] [ptr null, i32 64, ptr null]
; CHECK-NEXT: invoke void @throw() [ "funclet"(token [[CATCH]]) ]
; CHECK-NEXT: to label [[UNREACHABLE]] unwind label [[EHCLEANUP]]
; CHECK: ehcleanup:
; CHECK-NEXT: unreachable
;
invoke.cont:
- %0 = bitcast %struct.B* %p to <2 x i64>*
- %1 = load <2 x i64>, <2 x i64>* %0, align 8
- %2 = extractelement <2 x i64> %1, i32 0
+ %0 = load <2 x i64>, ptr %p, align 8
+ %1 = extractelement <2 x i64> %0, i32 0
invoke void @throw()
to label %unreachable unwind label %catch.dispatch
%cs = catchswitch within none [label %invoke.cont1] unwind label %ehcleanup
invoke.cont1: ; preds = %catch.dispatch
- %catch = catchpad within %cs [i8* null, i32 64, i8* null]
+ %catch = catchpad within %cs [ptr null, i32 64, ptr null]
invoke void @throw() [ "funclet"(token %catch) ]
to label %unreachable unwind label %ehcleanup
ehcleanup: ; preds = %invoke.cont1, %catch.dispatch
- %phi = phi i64 [ %2, %catch.dispatch ], [ 9, %invoke.cont1 ]
+ %phi = phi i64 [ %1, %catch.dispatch ], [ 9, %invoke.cont1 ]
%cleanup = cleanuppad within none []
call void @release(i64 %phi) [ "funclet"(token %cleanup) ]
cleanupret from %cleanup unwind to caller
target datalayout = "e-m:w-i64:64-f80:128-n8:16:32:64-S128"
target triple = "x86_64-pc-windows-msvc18.0.0"
-declare i32 @use(i8*)
+declare i32 @use(ptr)
; Should be able to sink %ptr load to %not.null block which is the NCD of %ptr users.
-define i32 @test1(i8** %addr, i1 %c) {
+define i32 @test1(ptr %addr, i1 %c) {
; CHECK-LABEL: @test1(
; CHECK-NEXT: entry:
-; CHECK-NEXT: [[PTR:%.*]] = load i8*, i8** [[ADDR:%.*]], align 8
+; CHECK-NEXT: [[PTR:%.*]] = load ptr, ptr [[ADDR:%.*]], align 8
; CHECK-NEXT: br i1 false, label [[NULL:%.*]], label [[NOT_NULL:%.*]]
; CHECK: null:
; CHECK-NEXT: br label [[EXIT:%.*]]
; CHECK: not.null:
-; CHECK-NEXT: [[Y:%.*]] = call i32 @use(i8* [[PTR]])
+; CHECK-NEXT: [[Y:%.*]] = call i32 @use(ptr [[PTR]])
; CHECK-NEXT: br i1 [[C:%.*]], label [[EXIT]], label [[NOT_NULL_2:%.*]]
; CHECK: not.null.2:
-; CHECK-NEXT: [[Z:%.*]] = call i32 @use(i8* [[PTR]])
+; CHECK-NEXT: [[Z:%.*]] = call i32 @use(ptr [[PTR]])
; CHECK-NEXT: br label [[EXIT]]
; CHECK: exit:
; CHECK-NEXT: [[P:%.*]] = phi i32 [ poison, [[NULL]] ], [ [[Y]], [[NOT_NULL]] ], [ [[Z]], [[NOT_NULL_2]] ]
; CHECK-NEXT: ret i32 [[P]]
;
entry:
- %ptr = load i8*, i8** %addr
- %cond = icmp eq i8** %addr, null
+ %ptr = load ptr, ptr %addr
+ %cond = icmp eq ptr %addr, null
br i1 %cond, label %null, label %not.null
null:
- %x = call i32 @use(i8* null)
+ %x = call i32 @use(ptr null)
br label %exit
not.null:
- %y = call i32 @use(i8* %ptr)
+ %y = call i32 @use(ptr %ptr)
br i1 %c, label %exit, label %not.null.2
not.null.2:
- %z = call i32 @use(i8* %ptr)
+ %z = call i32 @use(ptr %ptr)
br label %exit
exit:
}
; Should be able to sink %ptr load to %not.null block which is the NCD of %ptr users.
-define i32 @test2(i8** %addr, i1 %c) {
+define i32 @test2(ptr %addr, i1 %c) {
; CHECK-LABEL: @test2(
; CHECK-NEXT: entry:
-; CHECK-NEXT: [[COND:%.*]] = icmp eq i8** [[ADDR:%.*]], null
+; CHECK-NEXT: [[COND:%.*]] = icmp eq ptr [[ADDR:%.*]], null
; CHECK-NEXT: br i1 [[COND]], label [[EXIT:%.*]], label [[LOAD_BB:%.*]]
; CHECK: load.bb:
-; CHECK-NEXT: [[PTR:%.*]] = load i8*, i8** [[ADDR]], align 8
+; CHECK-NEXT: [[PTR:%.*]] = load ptr, ptr [[ADDR]], align 8
; CHECK-NEXT: br i1 [[C:%.*]], label [[LEFT:%.*]], label [[RIGHT:%.*]]
; CHECK: left:
-; CHECK-NEXT: [[X:%.*]] = call i32 @use(i8* null)
+; CHECK-NEXT: [[X:%.*]] = call i32 @use(ptr null)
; CHECK-NEXT: br label [[EXIT]]
; CHECK: right:
-; CHECK-NEXT: [[Y:%.*]] = call i32 @use(i8* [[PTR]])
+; CHECK-NEXT: [[Y:%.*]] = call i32 @use(ptr [[PTR]])
; CHECK-NEXT: br i1 [[C]], label [[EXIT]], label [[RIGHT_2:%.*]]
; CHECK: right.2:
-; CHECK-NEXT: [[Z:%.*]] = call i32 @use(i8* [[PTR]])
+; CHECK-NEXT: [[Z:%.*]] = call i32 @use(ptr [[PTR]])
; CHECK-NEXT: br label [[EXIT]]
; CHECK: exit:
; CHECK-NEXT: [[P:%.*]] = phi i32 [ [[X]], [[LEFT]] ], [ [[Y]], [[RIGHT]] ], [ [[Z]], [[RIGHT_2]] ], [ 0, [[ENTRY:%.*]] ]
; CHECK-NEXT: ret i32 [[P]]
;
entry:
- %cond = icmp eq i8** %addr, null
+ %cond = icmp eq ptr %addr, null
br i1 %cond, label %exit, label %load.bb
load.bb:
- %ptr = load i8*, i8** %addr
+ %ptr = load ptr, ptr %addr
br i1 %c, label %left, label %right
left:
- %x = call i32 @use(i8* null)
+ %x = call i32 @use(ptr null)
br label %exit
right:
- %y = call i32 @use(i8* %ptr)
+ %y = call i32 @use(ptr %ptr)
br i1 %c, label %exit, label %right.2
right.2:
- %z = call i32 @use(i8* %ptr)
+ %z = call i32 @use(ptr %ptr)
br label %exit
exit:
; Check that InstCombine can sink instructions to the landingpad of the invoke.
-define void @t0_noop(i32 %arg) personality i8* bitcast (i32 (...)* @__gxx_personality_v0 to i8*) {
+define void @t0_noop(i32 %arg) personality ptr @__gxx_personality_v0 {
; CHECK-LABEL: @t0_noop(
; CHECK-NEXT: entry:
; CHECK-NEXT: [[C:%.*]] = call i1 @cond()
; CHECK: invoke.cont:
; CHECK-NEXT: unreachable
; CHECK: lpad:
-; CHECK-NEXT: [[EH:%.*]] = landingpad { i8*, i32 }
+; CHECK-NEXT: [[EH:%.*]] = landingpad { ptr, i32 }
; CHECK-NEXT: cleanup
; CHECK-NEXT: [[V0:%.*]] = add i32 [[ARG:%.*]], 42
; CHECK-NEXT: call void @consume(i32 [[V0]])
; CHECK-NEXT: call void @destructor()
-; CHECK-NEXT: resume { i8*, i32 } [[EH]]
+; CHECK-NEXT: resume { ptr, i32 } [[EH]]
; CHECK: if.end:
; CHECK-NEXT: [[V1:%.*]] = add i32 [[ARG]], 24
; CHECK-NEXT: call void @consume(i32 [[V1]])
unreachable
lpad:
- %eh = landingpad { i8*, i32 } cleanup
+ %eh = landingpad { ptr, i32 } cleanup
call void @consume(i32 %v0)
call void @destructor()
- resume { i8*, i32 } %eh
+ resume { ptr, i32 } %eh
if.end:
call void @consume(i32 %v1)
}
; FIXME: we could invert all uses of %i1 here
-define i32 @n4(i1 %i0, i32 %v0, i32 %v1, i32 %v2, i32 %v3, i32 %v4, i32 %v5, i32* %dst) {
+define i32 @n4(i1 %i0, i32 %v0, i32 %v1, i32 %v2, i32 %v3, i32 %v4, i32 %v5, ptr %dst) {
; CHECK-LABEL: @n4(
; CHECK-NEXT: [[I1:%.*]] = icmp eq i32 [[V0:%.*]], [[V1:%.*]]
; CHECK-NEXT: [[I2:%.*]] = select i1 [[I1]], i32 [[V2:%.*]], i32 [[V3:%.*]]
-; CHECK-NEXT: store i32 [[I2]], i32* [[DST:%.*]], align 4
+; CHECK-NEXT: store i32 [[I2]], ptr [[DST:%.*]], align 4
; CHECK-NEXT: [[I3:%.*]] = xor i1 [[I0:%.*]], true
; CHECK-NEXT: [[I4:%.*]] = and i1 [[I1]], [[I3]]
; CHECK-NEXT: [[I5:%.*]] = select i1 [[I4]], i32 [[V4:%.*]], i32 [[V5:%.*]]
;
%i1 = icmp eq i32 %v0, %v1 ; has extra invertible use
%i2 = select i1 %i1, i32 %v2, i32 %v3 ; invertible use
- store i32 %i2, i32* %dst
+ store i32 %i2, ptr %dst
%i3 = xor i1 %i0, -1
%i4 = and i1 %i3, %i1
%i5 = select i1 %i4, i32 %v4, i32 %v5
}
; FIXME: we could invert all uses of %i1 here
-define i32 @n4(i1 %i0, i32 %v0, i32 %v1, i32 %v2, i32 %v3, i32 %v4, i32 %v5, i32* %dst) {
+define i32 @n4(i1 %i0, i32 %v0, i32 %v1, i32 %v2, i32 %v3, i32 %v4, i32 %v5, ptr %dst) {
; CHECK-LABEL: @n4(
; CHECK-NEXT: [[I1:%.*]] = icmp eq i32 [[V0:%.*]], [[V1:%.*]]
; CHECK-NEXT: [[I2:%.*]] = select i1 [[I1]], i32 [[V2:%.*]], i32 [[V3:%.*]]
-; CHECK-NEXT: store i32 [[I2]], i32* [[DST:%.*]], align 4
+; CHECK-NEXT: store i32 [[I2]], ptr [[DST:%.*]], align 4
; CHECK-NEXT: [[I3:%.*]] = xor i1 [[I0:%.*]], true
; CHECK-NEXT: [[I4:%.*]] = or i1 [[I1]], [[I3]]
; CHECK-NEXT: [[I5:%.*]] = select i1 [[I4]], i32 [[V4:%.*]], i32 [[V5:%.*]]
;
%i1 = icmp eq i32 %v0, %v1 ; has extra invertible use
%i2 = select i1 %i1, i32 %v2, i32 %v3 ; invertible use
- store i32 %i2, i32* %dst
+ store i32 %i2, ptr %dst
%i3 = xor i1 %i0, -1
%i4 = or i1 %i3, %i1
%i5 = select i1 %i4, i32 %v4, i32 %v5
declare i32 @bar()
-define i32 @test3(i32* nocapture readonly %P, i32 %i) {
+define i32 @test3(ptr nocapture readonly %P, i32 %i) {
; CHECK-LABEL: @test3(
; CHECK-NEXT: entry:
; CHECK-NEXT: switch i32 [[I:%.*]], label [[SW_EPILOG:%.*]] [
; CHECK-NEXT: ]
; CHECK: sw.bb:
; CHECK-NEXT: [[IDXPROM:%.*]] = sext i32 [[I]] to i64
-; CHECK-NEXT: [[ARRAYIDX:%.*]] = getelementptr inbounds i32, i32* [[P:%.*]], i64 [[IDXPROM]]
-; CHECK-NEXT: [[TMP0:%.*]] = load i32, i32* [[ARRAYIDX]], align 4
+; CHECK-NEXT: [[ARRAYIDX:%.*]] = getelementptr inbounds i32, ptr [[P:%.*]], i64 [[IDXPROM]]
+; CHECK-NEXT: [[TMP0:%.*]] = load i32, ptr [[ARRAYIDX]], align 4
; CHECK-NEXT: [[ADD:%.*]] = add nsw i32 [[TMP0]], [[I]]
; CHECK-NEXT: br label [[SW_EPILOG]]
; CHECK: sw.epilog:
;
entry:
%idxprom = sext i32 %i to i64
- %arrayidx = getelementptr inbounds i32, i32* %P, i64 %idxprom
- %0 = load i32, i32* %arrayidx, align 4
+ %arrayidx = getelementptr inbounds i32, ptr %P, i64 %idxprom
+ %0 = load i32, ptr %arrayidx, align 4
switch i32 %i, label %sw.epilog [
i32 5, label %sw.bb
i32 2, label %sw.bb
}
; Two uses in a single user (phi node). We just bail out.
-define i32 @test5(i32* nocapture readonly %P, i32 %i, i1 %cond) {
+define i32 @test5(ptr nocapture readonly %P, i32 %i, i1 %cond) {
; CHECK-LABEL: @test5(
; CHECK-NEXT: entry:
; CHECK-NEXT: [[IDXPROM:%.*]] = sext i32 [[I:%.*]] to i64
-; CHECK-NEXT: [[ARRAYIDX:%.*]] = getelementptr inbounds i32, i32* [[P:%.*]], i64 [[IDXPROM]]
-; CHECK-NEXT: [[TMP0:%.*]] = load i32, i32* [[ARRAYIDX]], align 4
+; CHECK-NEXT: [[ARRAYIDX:%.*]] = getelementptr inbounds i32, ptr [[P:%.*]], i64 [[IDXPROM]]
+; CHECK-NEXT: [[TMP0:%.*]] = load i32, ptr [[ARRAYIDX]], align 4
; CHECK-NEXT: br i1 [[COND:%.*]], label [[DISPATCHBB:%.*]], label [[SW_EPILOG:%.*]]
; CHECK: dispatchBB:
; CHECK-NEXT: [[ADD:%.*]] = shl nsw i32 [[I]], 1
;
entry:
%idxprom = sext i32 %i to i64
- %arrayidx = getelementptr inbounds i32, i32* %P, i64 %idxprom
- %0 = load i32, i32* %arrayidx, align 4
+ %arrayidx = getelementptr inbounds i32, ptr %P, i64 %idxprom
+ %0 = load i32, ptr %arrayidx, align 4
br i1 %cond, label %dispatchBB, label %sw.epilog
dispatchBB:
}
; Multiple uses but from same BB. We can sink.
-define i32 @test6(i32* nocapture readonly %P, i32 %i, i1 %cond) {
+define i32 @test6(ptr nocapture readonly %P, i32 %i, i1 %cond) {
; CHECK-LABEL: @test6(
; CHECK-NEXT: entry:
; CHECK-NEXT: [[ADD:%.*]] = shl nsw i32 [[I]], 1
; CHECK-NEXT: br label [[DISPATCHBB:%.*]]
; CHECK: dispatchBB:
; CHECK-NEXT: [[IDXPROM:%.*]] = sext i32 [[I:%.*]] to i64
-; CHECK-NEXT: [[ARRAYIDX:%.*]] = getelementptr inbounds i32, i32* [[P:%.*]], i64 [[IDXPROM]]
-; CHECK-NEXT: [[TMP0:%.*]] = load i32, i32* [[ARRAYIDX]], align 4
+; CHECK-NEXT: [[ARRAYIDX:%.*]] = getelementptr inbounds i32, ptr [[P:%.*]], i64 [[IDXPROM]]
+; CHECK-NEXT: [[TMP0:%.*]] = load i32, ptr [[ARRAYIDX]], align 4
; CHECK-NEXT: switch i32 [[I]], label [[SW_BB:%.*]] [
; CHECK-NEXT: i32 5, label [[SW_EPILOG:%.*]]
; CHECK-NEXT: i32 2, label [[SW_EPILOG]]
;
entry:
%idxprom = sext i32 %i to i64
- %arrayidx = getelementptr inbounds i32, i32* %P, i64 %idxprom
- %0 = load i32, i32* %arrayidx, align 4
+ %arrayidx = getelementptr inbounds i32, ptr %P, i64 %idxprom
+ %0 = load i32, ptr %arrayidx, align 4
%add = add nsw i32 %i, %i
br label %dispatchBB
entry:
%x.addr = alloca float, align 4
%y.addr = alloca float, align 4
- store float %x, float* %x.addr, align 4
- store float %y, float* %y.addr, align 4
- %0 = load float, float* %x.addr, align 4
- %1 = load float, float* %x.addr, align 4
+ store float %x, ptr %x.addr, align 4
+ store float %y, ptr %y.addr, align 4
+ %0 = load float, ptr %x.addr, align 4
+ %1 = load float, ptr %x.addr, align 4
%mul = fmul fast float %0, %1
%2 = call float @llvm.sqrt.f32(float %mul)
ret float %2
; Can't fold (fptrunc (sqrt (fpext x))) -> (sqrtf x) since there is another
; use of sqrt result.
-define float @test3(float* %v) nounwind uwtable ssp {
+define float @test3(ptr %v) nounwind uwtable ssp {
; CHECK-LABEL: @test3(
; CHECK-NEXT: [[CALL34:%.*]] = call double @sqrt(double 0x7FF8000000000000) #[[ATTR4]]
; CHECK-NEXT: [[CALL36:%.*]] = call i32 @foo(double [[CALL34]]) #[[ATTR5:[0-9]+]]
; CHECK-NEXT: [[CONV38:%.*]] = fptrunc double [[CALL34]] to float
; CHECK-NEXT: ret float [[CONV38]]
;
- %arrayidx13 = getelementptr inbounds float, float* %v, i64 2
- %tmp14 = load float, float* %arrayidx13
+ %arrayidx13 = getelementptr inbounds float, ptr %v, i64 2
+ %tmp14 = load float, ptr %arrayidx13
%mul18 = fmul float %tmp14, %tmp14
%add19 = fadd float undef, %mul18
%conv = fpext float %add19 to double
; RUN: opt < %s -passes=instcombine
; PR2670
-@g_127 = external global i32 ; <i32*> [#uses=1]
+@g_127 = external global i32 ; <ptr> [#uses=1]
define i32 @func_56(i32 %p_58, i32 %p_59, i32 %p_61, i16 signext %p_62) nounwind {
entry:
%rem = srem i64 %or, 1 ; <i64> [#uses=1]
%cmp = icmp eq i64 %rem, 1 ; <i1> [#uses=1]
%cmp.ext = zext i1 %cmp to i32 ; <i32> [#uses=1]
- store i32 %cmp.ext, i32* @g_127
+ store i32 %cmp.ext, ptr @g_127
ret i32 undef
}
define void @foo() nounwind {
entry:
%src = alloca [1024 x i8], align 64
- %src1 = getelementptr [1024 x i8], [1024 x i8]* %src, i32 0, i32 0
- call void @llvm.memcpy.p0i8.p0i8.i32(i8* align 32 getelementptr inbounds ([1024 x i8], [1024 x i8]* @dst, i32 0, i32 0), i8* align 32 %src1, i32 1024, i1 false)
- call void @frob(i8* %src1) nounwind
+ call void @llvm.memcpy.p0.p0.i32(ptr align 32 @dst, ptr align 32 %src, i32 1024, i1 false)
+ call void @frob(ptr %src) nounwind
ret void
}
-declare void @frob(i8*)
+declare void @frob(ptr)
-declare void @llvm.memcpy.p0i8.p0i8.i32(i8* nocapture, i8* nocapture, i32, i1) nounwind
+declare void @llvm.memcpy.p0.p0.i32(ptr nocapture, ptr nocapture, i32, i1) nounwind
; PR37713
; RUN: opt -passes=instcombine %s -S | FileCheck %s
-declare i8* @llvm.stacksave() #0
-declare void @llvm.stackrestore(i8*) #0
+declare ptr @llvm.stacksave() #0
+declare void @llvm.stackrestore(ptr) #0
-define i32* @test1(i32 %P) !dbg !6 {
+define ptr @test1(i32 %P) !dbg !6 {
; CHECK-LABEL: @test1(
-; CHECK-NEXT: call void @llvm.dbg.value(metadata i8* undef
+; CHECK-NEXT: call void @llvm.dbg.value(metadata ptr undef
; CHECK-NEXT: [[TMP1:%.*]] = zext i32 [[P:%.*]] to i64, !dbg !13
; CHECK-NEXT: [[A:%.*]] = alloca i32, i64 [[TMP1]], align 4, !dbg !13
-; CHECK-NEXT: call void @llvm.dbg.value(metadata i32* [[A]]
-; CHECK-NEXT: ret i32* [[A]], !dbg !14
+; CHECK-NEXT: call void @llvm.dbg.value(metadata ptr [[A]]
+; CHECK-NEXT: ret ptr [[A]], !dbg !14
;
- %tmp = call i8* @llvm.stacksave(), !dbg !12
- call void @llvm.dbg.value(metadata i8* %tmp, metadata !9, metadata !DIExpression()), !dbg !12
- call void @llvm.stackrestore(i8* %tmp), !dbg !13
+ %tmp = call ptr @llvm.stacksave(), !dbg !12
+ call void @llvm.dbg.value(metadata ptr %tmp, metadata !9, metadata !DIExpression()), !dbg !12
+ call void @llvm.stackrestore(ptr %tmp), !dbg !13
%A = alloca i32, i32 %P, !dbg !14
- call void @llvm.dbg.value(metadata i32* %A, metadata !11, metadata !DIExpression()), !dbg !14
- ret i32* %A, !dbg !15
+ call void @llvm.dbg.value(metadata ptr %A, metadata !11, metadata !DIExpression()), !dbg !14
+ ret ptr %A, !dbg !15
}
declare void @llvm.dbg.value(metadata, metadata, metadata) #1
@glob = global i32 0
-declare i8* @llvm.stacksave()
-declare void @llvm.stackrestore(i8*)
+declare ptr @llvm.stacksave()
+declare void @llvm.stackrestore(ptr)
;; Test that llvm.stackrestore is removed when possible.
-define i32* @test1(i32 %P) {
- %tmp = call i8* @llvm.stacksave( )
- call void @llvm.stackrestore( i8* %tmp ) ;; not restoring anything
+define ptr @test1(i32 %P) {
+ %tmp = call ptr @llvm.stacksave( )
+ call void @llvm.stackrestore( ptr %tmp ) ;; not restoring anything
%A = alloca i32, i32 %P
- ret i32* %A
+ ret ptr %A
}
-; CHECK-LABEL: define i32* @test1(
+; CHECK-LABEL: define ptr @test1(
; CHECK-NOT: call void @llvm.stackrestore
-; CHECK: ret i32*
+; CHECK: ret ptr
-define void @test2(i8* %X) {
- call void @llvm.stackrestore( i8* %X ) ;; no allocas before return.
+define void @test2(ptr %X) {
+ call void @llvm.stackrestore( ptr %X ) ;; no allocas before return.
ret void
}
bb: ; preds = %bb, %bb.preheader
%i.0.reg2mem.0 = phi i32 [ 0, %bb.preheader ], [ %indvar.next, %bb ] ; <i32> [#uses=2]
- %tmp = call i8* @llvm.stacksave( ) ; <i8*> [#uses=1]
- %tmp23 = alloca i8, i32 %size ; <i8*> [#uses=2]
- %tmp27 = getelementptr i8, i8* %tmp23, i32 %tmp25 ; <i8*> [#uses=1]
- store i8 0, i8* %tmp27, align 1
- %tmp28 = call i8* @llvm.stacksave( ) ; <i8*> [#uses=1]
- %tmp52 = alloca i8, i32 %size ; <i8*> [#uses=1]
- %tmp53 = call i8* @llvm.stacksave( ) ; <i8*> [#uses=1]
- %tmp77 = alloca i8, i32 %size ; <i8*> [#uses=1]
- %tmp78 = call i8* @llvm.stacksave( ) ; <i8*> [#uses=1]
- %tmp102 = alloca i8, i32 %size ; <i8*> [#uses=1]
- call void @bar( i32 %i.0.reg2mem.0, i8* %tmp23, i8* %tmp52, i8* %tmp77, i8* %tmp102, i32 %size ) nounwind
- call void @llvm.stackrestore( i8* %tmp78 )
- call void @llvm.stackrestore( i8* %tmp53 )
- call void @llvm.stackrestore( i8* %tmp28 )
- call void @llvm.stackrestore( i8* %tmp )
+ %tmp = call ptr @llvm.stacksave( ) ; <ptr> [#uses=1]
+ %tmp23 = alloca i8, i32 %size ; <ptr> [#uses=2]
+ %tmp27 = getelementptr i8, ptr %tmp23, i32 %tmp25 ; <ptr> [#uses=1]
+ store i8 0, ptr %tmp27, align 1
+ %tmp28 = call ptr @llvm.stacksave( ) ; <ptr> [#uses=1]
+ %tmp52 = alloca i8, i32 %size ; <ptr> [#uses=1]
+ %tmp53 = call ptr @llvm.stacksave( ) ; <ptr> [#uses=1]
+ %tmp77 = alloca i8, i32 %size ; <ptr> [#uses=1]
+ %tmp78 = call ptr @llvm.stacksave( ) ; <ptr> [#uses=1]
+ %tmp102 = alloca i8, i32 %size ; <ptr> [#uses=1]
+ call void @bar( i32 %i.0.reg2mem.0, ptr %tmp23, ptr %tmp52, ptr %tmp77, ptr %tmp102, i32 %size ) nounwind
+ call void @llvm.stackrestore( ptr %tmp78 )
+ call void @llvm.stackrestore( ptr %tmp53 )
+ call void @llvm.stackrestore( ptr %tmp28 )
+ call void @llvm.stackrestore( ptr %tmp )
%indvar.next = add i32 %i.0.reg2mem.0, 1 ; <i32> [#uses=2]
%exitcond = icmp eq i32 %indvar.next, %smax ; <i1> [#uses=1]
br i1 %exitcond, label %return, label %bb
}
; CHECK-LABEL: define void @foo(
-; CHECK: %tmp = call i8* @llvm.stacksave()
+; CHECK: %tmp = call ptr @llvm.stacksave()
; CHECK: alloca i8
; CHECK-NOT: stacksave
; CHECK: call void @bar(
-; CHECK-NEXT: call void @llvm.stackrestore(i8* %tmp)
+; CHECK-NEXT: call void @llvm.stackrestore(ptr %tmp)
; CHECK: ret void
-declare void @bar(i32, i8*, i8*, i8*, i8*, i32)
+declare void @bar(i32, ptr, ptr, ptr, ptr, i32)
-declare void @inalloca_callee(i32* inalloca(i32))
+declare void @inalloca_callee(ptr inalloca(i32))
define void @test3(i32 %c) {
entry:
loop:
%i = phi i32 [0, %entry], [%i1, %loop]
- %save1 = call i8* @llvm.stacksave()
+ %save1 = call ptr @llvm.stacksave()
%argmem = alloca inalloca i32
- store i32 0, i32* %argmem
- call void @inalloca_callee(i32* inalloca(i32) %argmem)
+ store i32 0, ptr %argmem
+ call void @inalloca_callee(ptr inalloca(i32) %argmem)
; This restore cannot be deleted, the restore below does not make it dead.
- call void @llvm.stackrestore(i8* %save1)
+ call void @llvm.stackrestore(ptr %save1)
; FIXME: We should be able to remove this save/restore pair, but we don't.
- %save2 = call i8* @llvm.stacksave()
- store i32 0, i32* @glob
- call void @llvm.stackrestore(i8* %save2)
+ %save2 = call ptr @llvm.stacksave()
+ store i32 0, ptr @glob
+ call void @llvm.stackrestore(ptr %save2)
%i1 = add i32 1, %i
%done = icmp eq i32 %i1, %c
br i1 %done, label %loop, label %return
; CHECK-LABEL: define void @test3(
; CHECK: loop:
; CHECK: %i = phi i32 [ 0, %entry ], [ %i1, %loop ]
-; CHECK: %save1 = call i8* @llvm.stacksave()
+; CHECK: %save1 = call ptr @llvm.stacksave()
; CHECK: %argmem = alloca inalloca i32
-; CHECK: store i32 0, i32* %argmem
-; CHECK: call void @inalloca_callee(i32* {{.*}} inalloca(i32) %argmem)
-; CHECK: call void @llvm.stackrestore(i8* %save1)
+; CHECK: store i32 0, ptr %argmem
+; CHECK: call void @inalloca_callee(ptr {{.*}} inalloca(i32) %argmem)
+; CHECK: call void @llvm.stackrestore(ptr %save1)
; CHECK: br i1 %done, label %loop, label %return
; CHECK: ret void
-define i32 @test4(i32 %m, i32* %a, i32* %b) {
+define i32 @test4(i32 %m, ptr %a, ptr %b) {
entry:
br label %for.body
for.body:
%x.012 = phi i32 [ 0, %entry ], [ %add2, %for.body ]
%i.011 = phi i32 [ 0, %entry ], [ %inc, %for.body ]
- %0 = call i8* @llvm.stacksave()
- %load1 = load i32, i32* %a, align 4
+ %0 = call ptr @llvm.stacksave()
+ %load1 = load i32, ptr %a, align 4
%mul1 = mul nsw i32 %load1, %m
%add1 = add nsw i32 %mul1, %x.012
- call void @llvm.stackrestore(i8* %0)
- %load2 = load i32, i32* %b, align 4
+ call void @llvm.stackrestore(ptr %0)
+ %load2 = load i32, ptr %b, align 4
%mul2 = mul nsw i32 %load2, %m
%add2 = add nsw i32 %mul2, %add1
- call void @llvm.stackrestore(i8* %0)
+ call void @llvm.stackrestore(ptr %0)
%inc = add nuw nsw i32 %i.011, 1
%exitcond.not = icmp eq i32 %inc, 100
br i1 %exitcond.not, label %for.cond.cleanup, label %for.body
; pointers being relocated at a statepoint.
-declare i32* @fake_personality_function()
+declare ptr @fake_personality_function()
declare void @func()
-define void @test(i32 addrspace(1)* %b) gc "statepoint-example" {
+define void @test(ptr addrspace(1) %b) gc "statepoint-example" {
; CHECK-LABEL: @test(
; CHECK-NEXT: entry:
-; CHECK-NEXT: [[D:%.*]] = getelementptr i32, i32 addrspace(1)* [[B:%.*]], i64 16
-; CHECK-NEXT: [[SAFEPOINT_TOKEN:%.*]] = tail call token (i64, i32, void ()*, i32, i32, ...) @llvm.experimental.gc.statepoint.p0f_isVoidf(i64 0, i32 0, void ()* nonnull elementtype(void ()) @func, i32 0, i32 0, i32 0, i32 0) [ "gc-live"(i32 addrspace(1)* [[B]], i32 addrspace(1)* [[D]]) ]
-; CHECK-NEXT: [[B_NEW_1:%.*]] = call i32 addrspace(1)* @llvm.experimental.gc.relocate.p1i32(token [[SAFEPOINT_TOKEN]], i32 0, i32 0)
-; CHECK-NEXT: [[B_NEW_2:%.*]] = call i32 addrspace(1)* @llvm.experimental.gc.relocate.p1i32(token [[SAFEPOINT_TOKEN]], i32 0, i32 0)
-; CHECK-NEXT: [[D_NEW_1:%.*]] = call i32 addrspace(1)* @llvm.experimental.gc.relocate.p1i32(token [[SAFEPOINT_TOKEN]], i32 0, i32 1)
-; CHECK-NEXT: [[D_NEW_2:%.*]] = call i32 addrspace(1)* @llvm.experimental.gc.relocate.p1i32(token [[SAFEPOINT_TOKEN]], i32 0, i32 1)
-; CHECK-NEXT: [[D_NEW_3:%.*]] = call i32 addrspace(1)* @llvm.experimental.gc.relocate.p1i32(token [[SAFEPOINT_TOKEN]], i32 0, i32 1)
-; CHECK-NEXT: [[D_NEW_4:%.*]] = call i32 addrspace(1)* @llvm.experimental.gc.relocate.p1i32(token [[SAFEPOINT_TOKEN]], i32 0, i32 1)
-; CHECK-NEXT: store i32 1, i32 addrspace(1)* [[B_NEW_1]], align 4
-; CHECK-NEXT: store i32 1, i32 addrspace(1)* [[B_NEW_2]], align 4
-; CHECK-NEXT: store i32 1, i32 addrspace(1)* [[D_NEW_1]], align 4
-; CHECK-NEXT: store i32 1, i32 addrspace(1)* [[D_NEW_2]], align 4
-; CHECK-NEXT: store i32 1, i32 addrspace(1)* [[D_NEW_3]], align 4
-; CHECK-NEXT: store i32 1, i32 addrspace(1)* [[D_NEW_4]], align 4
+; CHECK-NEXT: [[D:%.*]] = getelementptr i32, ptr addrspace(1) [[B:%.*]], i64 16
+; CHECK-NEXT: [[SAFEPOINT_TOKEN:%.*]] = tail call token (i64, i32, ptr, i32, i32, ...) @llvm.experimental.gc.statepoint.p0(i64 0, i32 0, ptr nonnull elementtype(void ()) @func, i32 0, i32 0, i32 0, i32 0) [ "gc-live"(ptr addrspace(1) [[B]], ptr addrspace(1) [[D]]) ]
+; CHECK-NEXT: [[B_NEW_1:%.*]] = call ptr addrspace(1) @llvm.experimental.gc.relocate.p1(token [[SAFEPOINT_TOKEN]], i32 0, i32 0)
+; CHECK-NEXT: [[B_NEW_2:%.*]] = call ptr addrspace(1) @llvm.experimental.gc.relocate.p1(token [[SAFEPOINT_TOKEN]], i32 0, i32 0)
+; CHECK-NEXT: [[D_NEW_1:%.*]] = call ptr addrspace(1) @llvm.experimental.gc.relocate.p1(token [[SAFEPOINT_TOKEN]], i32 0, i32 1)
+; CHECK-NEXT: [[D_NEW_2:%.*]] = call ptr addrspace(1) @llvm.experimental.gc.relocate.p1(token [[SAFEPOINT_TOKEN]], i32 0, i32 1)
+; CHECK-NEXT: [[D_NEW_3:%.*]] = call ptr addrspace(1) @llvm.experimental.gc.relocate.p1(token [[SAFEPOINT_TOKEN]], i32 0, i32 1)
+; CHECK-NEXT: [[D_NEW_4:%.*]] = call ptr addrspace(1) @llvm.experimental.gc.relocate.p1(token [[SAFEPOINT_TOKEN]], i32 0, i32 1)
+; CHECK-NEXT: store i32 1, ptr addrspace(1) [[B_NEW_1]], align 4
+; CHECK-NEXT: store i32 1, ptr addrspace(1) [[B_NEW_2]], align 4
+; CHECK-NEXT: store i32 1, ptr addrspace(1) [[D_NEW_1]], align 4
+; CHECK-NEXT: store i32 1, ptr addrspace(1) [[D_NEW_2]], align 4
+; CHECK-NEXT: store i32 1, ptr addrspace(1) [[D_NEW_3]], align 4
+; CHECK-NEXT: store i32 1, ptr addrspace(1) [[D_NEW_4]], align 4
; CHECK-NEXT: ret void
;
entry:
- %d = getelementptr i32, i32 addrspace(1)* %b, i64 16
- %safepoint_token = tail call token (i64, i32, void ()*, i32, i32, ...) @llvm.experimental.gc.statepoint.p0f_isVoidf(i64 0, i32 0, void ()* elementtype(void ()) @func, i32 0, i32 0, i32 0, i32 0) ["gc-live" (i32 addrspace(1)* %b, i32 addrspace(1)* %b, i32 addrspace(1)* %d, i32 addrspace(1)* %d)]
- %b.new.1 = call i32 addrspace(1)* @llvm.experimental.gc.relocate.p1i32(token %safepoint_token, i32 0, i32 0)
- %b.new.2 = call i32 addrspace(1)* @llvm.experimental.gc.relocate.p1i32(token %safepoint_token, i32 0, i32 1)
- %d.new.1 = call i32 addrspace(1)* @llvm.experimental.gc.relocate.p1i32(token %safepoint_token, i32 0, i32 2)
- %d.new.2 = call i32 addrspace(1)* @llvm.experimental.gc.relocate.p1i32(token %safepoint_token, i32 0, i32 3)
- %d.new.3 = call i32 addrspace(1)* @llvm.experimental.gc.relocate.p1i32(token %safepoint_token, i32 1, i32 2)
- %d.new.4 = call i32 addrspace(1)* @llvm.experimental.gc.relocate.p1i32(token %safepoint_token, i32 1, i32 3)
- store i32 1, i32 addrspace(1)* %b.new.1
- store i32 1, i32 addrspace(1)* %b.new.2
- store i32 1, i32 addrspace(1)* %d.new.1
- store i32 1, i32 addrspace(1)* %d.new.2
- store i32 1, i32 addrspace(1)* %d.new.3
- store i32 1, i32 addrspace(1)* %d.new.4
+ %d = getelementptr i32, ptr addrspace(1) %b, i64 16
+ %safepoint_token = tail call token (i64, i32, ptr, i32, i32, ...) @llvm.experimental.gc.statepoint.p0(i64 0, i32 0, ptr elementtype(void ()) @func, i32 0, i32 0, i32 0, i32 0) ["gc-live" (ptr addrspace(1) %b, ptr addrspace(1) %b, ptr addrspace(1) %d, ptr addrspace(1) %d)]
+ %b.new.1 = call ptr addrspace(1) @llvm.experimental.gc.relocate.p1(token %safepoint_token, i32 0, i32 0)
+ %b.new.2 = call ptr addrspace(1) @llvm.experimental.gc.relocate.p1(token %safepoint_token, i32 0, i32 1)
+ %d.new.1 = call ptr addrspace(1) @llvm.experimental.gc.relocate.p1(token %safepoint_token, i32 0, i32 2)
+ %d.new.2 = call ptr addrspace(1) @llvm.experimental.gc.relocate.p1(token %safepoint_token, i32 0, i32 3)
+ %d.new.3 = call ptr addrspace(1) @llvm.experimental.gc.relocate.p1(token %safepoint_token, i32 1, i32 2)
+ %d.new.4 = call ptr addrspace(1) @llvm.experimental.gc.relocate.p1(token %safepoint_token, i32 1, i32 3)
+ store i32 1, ptr addrspace(1) %b.new.1
+ store i32 1, ptr addrspace(1) %b.new.2
+ store i32 1, ptr addrspace(1) %d.new.1
+ store i32 1, ptr addrspace(1) %d.new.2
+ store i32 1, ptr addrspace(1) %d.new.3
+ store i32 1, ptr addrspace(1) %d.new.4
ret void
}
-define void @test_no_derived_use(i32 addrspace(1)* %b) gc "statepoint-example" {
+define void @test_no_derived_use(ptr addrspace(1) %b) gc "statepoint-example" {
; CHECK-LABEL: @test_no_derived_use(
; CHECK-NEXT: entry:
-; CHECK-NEXT: [[SAFEPOINT_TOKEN:%.*]] = tail call token (i64, i32, void ()*, i32, i32, ...) @llvm.experimental.gc.statepoint.p0f_isVoidf(i64 0, i32 0, void ()* nonnull elementtype(void ()) @func, i32 0, i32 0, i32 0, i32 0) [ "gc-live"(i32 addrspace(1)* [[B:%.*]]) ]
-; CHECK-NEXT: [[B_NEW_1:%.*]] = call i32 addrspace(1)* @llvm.experimental.gc.relocate.p1i32(token [[SAFEPOINT_TOKEN]], i32 0, i32 0)
-; CHECK-NEXT: store i32 1, i32 addrspace(1)* [[B_NEW_1]], align 4
+; CHECK-NEXT: [[SAFEPOINT_TOKEN:%.*]] = tail call token (i64, i32, ptr, i32, i32, ...) @llvm.experimental.gc.statepoint.p0(i64 0, i32 0, ptr nonnull elementtype(void ()) @func, i32 0, i32 0, i32 0, i32 0) [ "gc-live"(ptr addrspace(1) [[B:%.*]]) ]
+; CHECK-NEXT: [[B_NEW_1:%.*]] = call ptr addrspace(1) @llvm.experimental.gc.relocate.p1(token [[SAFEPOINT_TOKEN]], i32 0, i32 0)
+; CHECK-NEXT: store i32 1, ptr addrspace(1) [[B_NEW_1]], align 4
; CHECK-NEXT: ret void
;
entry:
- %d = getelementptr i32, i32 addrspace(1)* %b, i64 16
- %safepoint_token = tail call token (i64, i32, void ()*, i32, i32, ...) @llvm.experimental.gc.statepoint.p0f_isVoidf(i64 0, i32 0, void ()* elementtype(void ()) @func, i32 0, i32 0, i32 0, i32 0) ["gc-live" (i32 addrspace(1)* %b, i32 addrspace(1)* %b, i32 addrspace(1)* %d, i32 addrspace(1)* %d)]
- %b.new.1 = call i32 addrspace(1)* @llvm.experimental.gc.relocate.p1i32(token %safepoint_token, i32 0, i32 0)
- %b.new.2 = call i32 addrspace(1)* @llvm.experimental.gc.relocate.p1i32(token %safepoint_token, i32 0, i32 1)
- %d.new.1 = call i32 addrspace(1)* @llvm.experimental.gc.relocate.p1i32(token %safepoint_token, i32 0, i32 2)
- %d.new.2 = call i32 addrspace(1)* @llvm.experimental.gc.relocate.p1i32(token %safepoint_token, i32 0, i32 3)
- %d.new.3 = call i32 addrspace(1)* @llvm.experimental.gc.relocate.p1i32(token %safepoint_token, i32 1, i32 2)
- %d.new.4 = call i32 addrspace(1)* @llvm.experimental.gc.relocate.p1i32(token %safepoint_token, i32 1, i32 3)
- store i32 1, i32 addrspace(1)* %b.new.1
+ %d = getelementptr i32, ptr addrspace(1) %b, i64 16
+ %safepoint_token = tail call token (i64, i32, ptr, i32, i32, ...) @llvm.experimental.gc.statepoint.p0(i64 0, i32 0, ptr elementtype(void ()) @func, i32 0, i32 0, i32 0, i32 0) ["gc-live" (ptr addrspace(1) %b, ptr addrspace(1) %b, ptr addrspace(1) %d, ptr addrspace(1) %d)]
+ %b.new.1 = call ptr addrspace(1) @llvm.experimental.gc.relocate.p1(token %safepoint_token, i32 0, i32 0)
+ %b.new.2 = call ptr addrspace(1) @llvm.experimental.gc.relocate.p1(token %safepoint_token, i32 0, i32 1)
+ %d.new.1 = call ptr addrspace(1) @llvm.experimental.gc.relocate.p1(token %safepoint_token, i32 0, i32 2)
+ %d.new.2 = call ptr addrspace(1) @llvm.experimental.gc.relocate.p1(token %safepoint_token, i32 0, i32 3)
+ %d.new.3 = call ptr addrspace(1) @llvm.experimental.gc.relocate.p1(token %safepoint_token, i32 1, i32 2)
+ %d.new.4 = call ptr addrspace(1) @llvm.experimental.gc.relocate.p1(token %safepoint_token, i32 1, i32 3)
+ store i32 1, ptr addrspace(1) %b.new.1
ret void
}
-define void @test_no_base_use(i32 addrspace(1)* %b) gc "statepoint-example" {
+define void @test_no_base_use(ptr addrspace(1) %b) gc "statepoint-example" {
; CHECK-LABEL: @test_no_base_use(
; CHECK-NEXT: entry:
-; CHECK-NEXT: [[D:%.*]] = getelementptr i32, i32 addrspace(1)* [[B:%.*]], i64 16
-; CHECK-NEXT: [[SAFEPOINT_TOKEN:%.*]] = tail call token (i64, i32, void ()*, i32, i32, ...) @llvm.experimental.gc.statepoint.p0f_isVoidf(i64 0, i32 0, void ()* nonnull elementtype(void ()) @func, i32 0, i32 0, i32 0, i32 0) [ "gc-live"(i32 addrspace(1)* [[B]], i32 addrspace(1)* [[D]]) ]
-; CHECK-NEXT: [[D_NEW_1:%.*]] = call i32 addrspace(1)* @llvm.experimental.gc.relocate.p1i32(token [[SAFEPOINT_TOKEN]], i32 0, i32 1)
-; CHECK-NEXT: store i32 1, i32 addrspace(1)* [[D_NEW_1]], align 4
+; CHECK-NEXT: [[D:%.*]] = getelementptr i32, ptr addrspace(1) [[B:%.*]], i64 16
+; CHECK-NEXT: [[SAFEPOINT_TOKEN:%.*]] = tail call token (i64, i32, ptr, i32, i32, ...) @llvm.experimental.gc.statepoint.p0(i64 0, i32 0, ptr nonnull elementtype(void ()) @func, i32 0, i32 0, i32 0, i32 0) [ "gc-live"(ptr addrspace(1) [[B]], ptr addrspace(1) [[D]]) ]
+; CHECK-NEXT: [[D_NEW_1:%.*]] = call ptr addrspace(1) @llvm.experimental.gc.relocate.p1(token [[SAFEPOINT_TOKEN]], i32 0, i32 1)
+; CHECK-NEXT: store i32 1, ptr addrspace(1) [[D_NEW_1]], align 4
; CHECK-NEXT: ret void
;
entry:
- %d = getelementptr i32, i32 addrspace(1)* %b, i64 16
- %safepoint_token = tail call token (i64, i32, void ()*, i32, i32, ...) @llvm.experimental.gc.statepoint.p0f_isVoidf(i64 0, i32 0, void ()* elementtype(void ()) @func, i32 0, i32 0, i32 0, i32 0) ["gc-live" (i32 addrspace(1)* %b, i32 addrspace(1)* %b, i32 addrspace(1)* %d, i32 addrspace(1)* %d)]
- %b.new.1 = call i32 addrspace(1)* @llvm.experimental.gc.relocate.p1i32(token %safepoint_token, i32 0, i32 0)
- %b.new.2 = call i32 addrspace(1)* @llvm.experimental.gc.relocate.p1i32(token %safepoint_token, i32 0, i32 1)
- %d.new.1 = call i32 addrspace(1)* @llvm.experimental.gc.relocate.p1i32(token %safepoint_token, i32 0, i32 2)
- %d.new.2 = call i32 addrspace(1)* @llvm.experimental.gc.relocate.p1i32(token %safepoint_token, i32 0, i32 3)
- %d.new.3 = call i32 addrspace(1)* @llvm.experimental.gc.relocate.p1i32(token %safepoint_token, i32 1, i32 2)
- %d.new.4 = call i32 addrspace(1)* @llvm.experimental.gc.relocate.p1i32(token %safepoint_token, i32 1, i32 3)
- store i32 1, i32 addrspace(1)* %d.new.1
+ %d = getelementptr i32, ptr addrspace(1) %b, i64 16
+ %safepoint_token = tail call token (i64, i32, ptr, i32, i32, ...) @llvm.experimental.gc.statepoint.p0(i64 0, i32 0, ptr elementtype(void ()) @func, i32 0, i32 0, i32 0, i32 0) ["gc-live" (ptr addrspace(1) %b, ptr addrspace(1) %b, ptr addrspace(1) %d, ptr addrspace(1) %d)]
+ %b.new.1 = call ptr addrspace(1) @llvm.experimental.gc.relocate.p1(token %safepoint_token, i32 0, i32 0)
+ %b.new.2 = call ptr addrspace(1) @llvm.experimental.gc.relocate.p1(token %safepoint_token, i32 0, i32 1)
+ %d.new.1 = call ptr addrspace(1) @llvm.experimental.gc.relocate.p1(token %safepoint_token, i32 0, i32 2)
+ %d.new.2 = call ptr addrspace(1) @llvm.experimental.gc.relocate.p1(token %safepoint_token, i32 0, i32 3)
+ %d.new.3 = call ptr addrspace(1) @llvm.experimental.gc.relocate.p1(token %safepoint_token, i32 1, i32 2)
+ %d.new.4 = call ptr addrspace(1) @llvm.experimental.gc.relocate.p1(token %safepoint_token, i32 1, i32 3)
+ store i32 1, ptr addrspace(1) %d.new.1
ret void
}
-define void @test_invoke(i32 addrspace(1)* %b) gc "statepoint-example" personality i32* ()* @fake_personality_function {
+define void @test_invoke(ptr addrspace(1) %b) gc "statepoint-example" personality ptr @fake_personality_function {
; CHECK-LABEL: @test_invoke(
; CHECK-NEXT: entry:
-; CHECK-NEXT: [[D:%.*]] = getelementptr i32, i32 addrspace(1)* [[B:%.*]], i64 16
-; CHECK-NEXT: [[SAFEPOINT_TOKEN:%.*]] = invoke token (i64, i32, void ()*, i32, i32, ...) @llvm.experimental.gc.statepoint.p0f_isVoidf(i64 0, i32 0, void ()* nonnull elementtype(void ()) @func, i32 0, i32 0, i32 0, i32 0) [ "gc-live"(i32 addrspace(1)* [[B]], i32 addrspace(1)* [[D]]) ]
+; CHECK-NEXT: [[D:%.*]] = getelementptr i32, ptr addrspace(1) [[B:%.*]], i64 16
+; CHECK-NEXT: [[SAFEPOINT_TOKEN:%.*]] = invoke token (i64, i32, ptr, i32, i32, ...) @llvm.experimental.gc.statepoint.p0(i64 0, i32 0, ptr nonnull elementtype(void ()) @func, i32 0, i32 0, i32 0, i32 0) [ "gc-live"(ptr addrspace(1) [[B]], ptr addrspace(1) [[D]]) ]
; CHECK-NEXT: to label [[NORMAL_DEST:%.*]] unwind label [[UNWIND_DEST:%.*]]
; CHECK: normal_dest:
-; CHECK-NEXT: [[B_NEW_1:%.*]] = call i32 addrspace(1)* @llvm.experimental.gc.relocate.p1i32(token [[SAFEPOINT_TOKEN]], i32 0, i32 0)
-; CHECK-NEXT: [[B_NEW_2:%.*]] = call i32 addrspace(1)* @llvm.experimental.gc.relocate.p1i32(token [[SAFEPOINT_TOKEN]], i32 0, i32 0)
-; CHECK-NEXT: [[D_NEW_1:%.*]] = call i32 addrspace(1)* @llvm.experimental.gc.relocate.p1i32(token [[SAFEPOINT_TOKEN]], i32 0, i32 1)
-; CHECK-NEXT: [[D_NEW_2:%.*]] = call i32 addrspace(1)* @llvm.experimental.gc.relocate.p1i32(token [[SAFEPOINT_TOKEN]], i32 0, i32 1)
-; CHECK-NEXT: [[D_NEW_3:%.*]] = call i32 addrspace(1)* @llvm.experimental.gc.relocate.p1i32(token [[SAFEPOINT_TOKEN]], i32 0, i32 1)
-; CHECK-NEXT: [[D_NEW_4:%.*]] = call i32 addrspace(1)* @llvm.experimental.gc.relocate.p1i32(token [[SAFEPOINT_TOKEN]], i32 0, i32 1)
-; CHECK-NEXT: store i32 1, i32 addrspace(1)* [[B_NEW_1]], align 4
-; CHECK-NEXT: store i32 1, i32 addrspace(1)* [[B_NEW_2]], align 4
-; CHECK-NEXT: store i32 1, i32 addrspace(1)* [[D_NEW_1]], align 4
-; CHECK-NEXT: store i32 1, i32 addrspace(1)* [[D_NEW_2]], align 4
-; CHECK-NEXT: store i32 1, i32 addrspace(1)* [[D_NEW_3]], align 4
-; CHECK-NEXT: store i32 1, i32 addrspace(1)* [[D_NEW_4]], align 4
+; CHECK-NEXT: [[B_NEW_1:%.*]] = call ptr addrspace(1) @llvm.experimental.gc.relocate.p1(token [[SAFEPOINT_TOKEN]], i32 0, i32 0)
+; CHECK-NEXT: [[B_NEW_2:%.*]] = call ptr addrspace(1) @llvm.experimental.gc.relocate.p1(token [[SAFEPOINT_TOKEN]], i32 0, i32 0)
+; CHECK-NEXT: [[D_NEW_1:%.*]] = call ptr addrspace(1) @llvm.experimental.gc.relocate.p1(token [[SAFEPOINT_TOKEN]], i32 0, i32 1)
+; CHECK-NEXT: [[D_NEW_2:%.*]] = call ptr addrspace(1) @llvm.experimental.gc.relocate.p1(token [[SAFEPOINT_TOKEN]], i32 0, i32 1)
+; CHECK-NEXT: [[D_NEW_3:%.*]] = call ptr addrspace(1) @llvm.experimental.gc.relocate.p1(token [[SAFEPOINT_TOKEN]], i32 0, i32 1)
+; CHECK-NEXT: [[D_NEW_4:%.*]] = call ptr addrspace(1) @llvm.experimental.gc.relocate.p1(token [[SAFEPOINT_TOKEN]], i32 0, i32 1)
+; CHECK-NEXT: store i32 1, ptr addrspace(1) [[B_NEW_1]], align 4
+; CHECK-NEXT: store i32 1, ptr addrspace(1) [[B_NEW_2]], align 4
+; CHECK-NEXT: store i32 1, ptr addrspace(1) [[D_NEW_1]], align 4
+; CHECK-NEXT: store i32 1, ptr addrspace(1) [[D_NEW_2]], align 4
+; CHECK-NEXT: store i32 1, ptr addrspace(1) [[D_NEW_3]], align 4
+; CHECK-NEXT: store i32 1, ptr addrspace(1) [[D_NEW_4]], align 4
; CHECK-NEXT: ret void
; CHECK: unwind_dest:
; CHECK-NEXT: [[LPAD:%.*]] = landingpad token
; CHECK-NEXT: cleanup
-; CHECK-NEXT: [[LPB_NEW_1:%.*]] = call i32 addrspace(1)* @llvm.experimental.gc.relocate.p1i32(token [[LPAD]], i32 0, i32 0)
-; CHECK-NEXT: [[LPB_NEW_2:%.*]] = call i32 addrspace(1)* @llvm.experimental.gc.relocate.p1i32(token [[LPAD]], i32 0, i32 0)
-; CHECK-NEXT: [[LPD_NEW_1:%.*]] = call i32 addrspace(1)* @llvm.experimental.gc.relocate.p1i32(token [[LPAD]], i32 0, i32 1)
-; CHECK-NEXT: [[LPD_NEW_2:%.*]] = call i32 addrspace(1)* @llvm.experimental.gc.relocate.p1i32(token [[LPAD]], i32 0, i32 1)
-; CHECK-NEXT: [[LPD_NEW_3:%.*]] = call i32 addrspace(1)* @llvm.experimental.gc.relocate.p1i32(token [[LPAD]], i32 0, i32 1)
-; CHECK-NEXT: [[LPD_NEW_4:%.*]] = call i32 addrspace(1)* @llvm.experimental.gc.relocate.p1i32(token [[LPAD]], i32 0, i32 1)
-; CHECK-NEXT: store i32 2, i32 addrspace(1)* [[LPB_NEW_1]], align 4
-; CHECK-NEXT: store i32 2, i32 addrspace(1)* [[LPB_NEW_2]], align 4
-; CHECK-NEXT: store i32 2, i32 addrspace(1)* [[LPD_NEW_1]], align 4
-; CHECK-NEXT: store i32 2, i32 addrspace(1)* [[LPD_NEW_2]], align 4
-; CHECK-NEXT: store i32 2, i32 addrspace(1)* [[LPD_NEW_3]], align 4
-; CHECK-NEXT: store i32 2, i32 addrspace(1)* [[LPD_NEW_4]], align 4
+; CHECK-NEXT: [[LPB_NEW_1:%.*]] = call ptr addrspace(1) @llvm.experimental.gc.relocate.p1(token [[LPAD]], i32 0, i32 0)
+; CHECK-NEXT: [[LPB_NEW_2:%.*]] = call ptr addrspace(1) @llvm.experimental.gc.relocate.p1(token [[LPAD]], i32 0, i32 0)
+; CHECK-NEXT: [[LPD_NEW_1:%.*]] = call ptr addrspace(1) @llvm.experimental.gc.relocate.p1(token [[LPAD]], i32 0, i32 1)
+; CHECK-NEXT: [[LPD_NEW_2:%.*]] = call ptr addrspace(1) @llvm.experimental.gc.relocate.p1(token [[LPAD]], i32 0, i32 1)
+; CHECK-NEXT: [[LPD_NEW_3:%.*]] = call ptr addrspace(1) @llvm.experimental.gc.relocate.p1(token [[LPAD]], i32 0, i32 1)
+; CHECK-NEXT: [[LPD_NEW_4:%.*]] = call ptr addrspace(1) @llvm.experimental.gc.relocate.p1(token [[LPAD]], i32 0, i32 1)
+; CHECK-NEXT: store i32 2, ptr addrspace(1) [[LPB_NEW_1]], align 4
+; CHECK-NEXT: store i32 2, ptr addrspace(1) [[LPB_NEW_2]], align 4
+; CHECK-NEXT: store i32 2, ptr addrspace(1) [[LPD_NEW_1]], align 4
+; CHECK-NEXT: store i32 2, ptr addrspace(1) [[LPD_NEW_2]], align 4
+; CHECK-NEXT: store i32 2, ptr addrspace(1) [[LPD_NEW_3]], align 4
+; CHECK-NEXT: store i32 2, ptr addrspace(1) [[LPD_NEW_4]], align 4
; CHECK-NEXT: ret void
;
entry:
- %d = getelementptr i32, i32 addrspace(1)* %b, i64 16
- %safepoint_token = invoke token (i64, i32, void ()*, i32, i32, ...) @llvm.experimental.gc.statepoint.p0f_isVoidf(i64 0, i32 0, void ()* elementtype(void ()) @func, i32 0, i32 0, i32 0, i32 0) ["gc-live" (i32 addrspace(1)* %b, i32 addrspace(1)* %b, i32 addrspace(1)* %d, i32 addrspace(1)* %d)]
+ %d = getelementptr i32, ptr addrspace(1) %b, i64 16
+ %safepoint_token = invoke token (i64, i32, ptr, i32, i32, ...) @llvm.experimental.gc.statepoint.p0(i64 0, i32 0, ptr elementtype(void ()) @func, i32 0, i32 0, i32 0, i32 0) ["gc-live" (ptr addrspace(1) %b, ptr addrspace(1) %b, ptr addrspace(1) %d, ptr addrspace(1) %d)]
to label %normal_dest unwind label %unwind_dest
normal_dest:
- %b.new.1 = call i32 addrspace(1)* @llvm.experimental.gc.relocate.p1i32(token %safepoint_token, i32 0, i32 0)
- %b.new.2 = call i32 addrspace(1)* @llvm.experimental.gc.relocate.p1i32(token %safepoint_token, i32 0, i32 1)
- %d.new.1 = call i32 addrspace(1)* @llvm.experimental.gc.relocate.p1i32(token %safepoint_token, i32 0, i32 2)
- %d.new.2 = call i32 addrspace(1)* @llvm.experimental.gc.relocate.p1i32(token %safepoint_token, i32 0, i32 3)
- %d.new.3 = call i32 addrspace(1)* @llvm.experimental.gc.relocate.p1i32(token %safepoint_token, i32 1, i32 2)
- %d.new.4 = call i32 addrspace(1)* @llvm.experimental.gc.relocate.p1i32(token %safepoint_token, i32 1, i32 3)
- store i32 1, i32 addrspace(1)* %b.new.1
- store i32 1, i32 addrspace(1)* %b.new.2
- store i32 1, i32 addrspace(1)* %d.new.1
- store i32 1, i32 addrspace(1)* %d.new.2
- store i32 1, i32 addrspace(1)* %d.new.3
- store i32 1, i32 addrspace(1)* %d.new.4
+ %b.new.1 = call ptr addrspace(1) @llvm.experimental.gc.relocate.p1(token %safepoint_token, i32 0, i32 0)
+ %b.new.2 = call ptr addrspace(1) @llvm.experimental.gc.relocate.p1(token %safepoint_token, i32 0, i32 1)
+ %d.new.1 = call ptr addrspace(1) @llvm.experimental.gc.relocate.p1(token %safepoint_token, i32 0, i32 2)
+ %d.new.2 = call ptr addrspace(1) @llvm.experimental.gc.relocate.p1(token %safepoint_token, i32 0, i32 3)
+ %d.new.3 = call ptr addrspace(1) @llvm.experimental.gc.relocate.p1(token %safepoint_token, i32 1, i32 2)
+ %d.new.4 = call ptr addrspace(1) @llvm.experimental.gc.relocate.p1(token %safepoint_token, i32 1, i32 3)
+ store i32 1, ptr addrspace(1) %b.new.1
+ store i32 1, ptr addrspace(1) %b.new.2
+ store i32 1, ptr addrspace(1) %d.new.1
+ store i32 1, ptr addrspace(1) %d.new.2
+ store i32 1, ptr addrspace(1) %d.new.3
+ store i32 1, ptr addrspace(1) %d.new.4
ret void
unwind_dest:
%lpad = landingpad token
cleanup
- %lpb.new.1 = call i32 addrspace(1)* @llvm.experimental.gc.relocate.p1i32(token %lpad, i32 0, i32 0)
- %lpb.new.2 = call i32 addrspace(1)* @llvm.experimental.gc.relocate.p1i32(token %lpad, i32 0, i32 1)
- %lpd.new.1 = call i32 addrspace(1)* @llvm.experimental.gc.relocate.p1i32(token %lpad, i32 0, i32 2)
- %lpd.new.2 = call i32 addrspace(1)* @llvm.experimental.gc.relocate.p1i32(token %lpad, i32 0, i32 3)
- %lpd.new.3 = call i32 addrspace(1)* @llvm.experimental.gc.relocate.p1i32(token %lpad, i32 1, i32 2)
- %lpd.new.4 = call i32 addrspace(1)* @llvm.experimental.gc.relocate.p1i32(token %lpad, i32 1, i32 3)
- store i32 2, i32 addrspace(1)* %lpb.new.1
- store i32 2, i32 addrspace(1)* %lpb.new.2
- store i32 2, i32 addrspace(1)* %lpd.new.1
- store i32 2, i32 addrspace(1)* %lpd.new.2
- store i32 2, i32 addrspace(1)* %lpd.new.3
- store i32 2, i32 addrspace(1)* %lpd.new.4
+ %lpb.new.1 = call ptr addrspace(1) @llvm.experimental.gc.relocate.p1(token %lpad, i32 0, i32 0)
+ %lpb.new.2 = call ptr addrspace(1) @llvm.experimental.gc.relocate.p1(token %lpad, i32 0, i32 1)
+ %lpd.new.1 = call ptr addrspace(1) @llvm.experimental.gc.relocate.p1(token %lpad, i32 0, i32 2)
+ %lpd.new.2 = call ptr addrspace(1) @llvm.experimental.gc.relocate.p1(token %lpad, i32 0, i32 3)
+ %lpd.new.3 = call ptr addrspace(1) @llvm.experimental.gc.relocate.p1(token %lpad, i32 1, i32 2)
+ %lpd.new.4 = call ptr addrspace(1) @llvm.experimental.gc.relocate.p1(token %lpad, i32 1, i32 3)
+ store i32 2, ptr addrspace(1) %lpb.new.1
+ store i32 2, ptr addrspace(1) %lpb.new.2
+ store i32 2, ptr addrspace(1) %lpd.new.1
+ store i32 2, ptr addrspace(1) %lpd.new.2
+ store i32 2, ptr addrspace(1) %lpd.new.3
+ store i32 2, ptr addrspace(1) %lpd.new.4
ret void
}
-define void @test_no_derived_use_invoke(i32 addrspace(1)* %b) gc "statepoint-example" personality i32* ()* @fake_personality_function {
+define void @test_no_derived_use_invoke(ptr addrspace(1) %b) gc "statepoint-example" personality ptr @fake_personality_function {
; CHECK-LABEL: @test_no_derived_use_invoke(
; CHECK-NEXT: entry:
-; CHECK-NEXT: [[SAFEPOINT_TOKEN:%.*]] = invoke token (i64, i32, void ()*, i32, i32, ...) @llvm.experimental.gc.statepoint.p0f_isVoidf(i64 0, i32 0, void ()* nonnull elementtype(void ()) @func, i32 0, i32 0, i32 0, i32 0) [ "gc-live"(i32 addrspace(1)* [[B:%.*]]) ]
+; CHECK-NEXT: [[SAFEPOINT_TOKEN:%.*]] = invoke token (i64, i32, ptr, i32, i32, ...) @llvm.experimental.gc.statepoint.p0(i64 0, i32 0, ptr nonnull elementtype(void ()) @func, i32 0, i32 0, i32 0, i32 0) [ "gc-live"(ptr addrspace(1) [[B:%.*]]) ]
; CHECK-NEXT: to label [[NORMAL_DEST:%.*]] unwind label [[UNWIND_DEST:%.*]]
; CHECK: normal_dest:
-; CHECK-NEXT: [[B_NEW_1:%.*]] = call i32 addrspace(1)* @llvm.experimental.gc.relocate.p1i32(token [[SAFEPOINT_TOKEN]], i32 0, i32 0)
-; CHECK-NEXT: store i32 1, i32 addrspace(1)* [[B_NEW_1]], align 4
+; CHECK-NEXT: [[B_NEW_1:%.*]] = call ptr addrspace(1) @llvm.experimental.gc.relocate.p1(token [[SAFEPOINT_TOKEN]], i32 0, i32 0)
+; CHECK-NEXT: store i32 1, ptr addrspace(1) [[B_NEW_1]], align 4
; CHECK-NEXT: ret void
; CHECK: unwind_dest:
; CHECK-NEXT: [[LPAD:%.*]] = landingpad token
; CHECK-NEXT: cleanup
-; CHECK-NEXT: [[LPB_NEW_1:%.*]] = call i32 addrspace(1)* @llvm.experimental.gc.relocate.p1i32(token [[LPAD]], i32 0, i32 0)
-; CHECK-NEXT: store i32 2, i32 addrspace(1)* [[LPB_NEW_1]], align 4
+; CHECK-NEXT: [[LPB_NEW_1:%.*]] = call ptr addrspace(1) @llvm.experimental.gc.relocate.p1(token [[LPAD]], i32 0, i32 0)
+; CHECK-NEXT: store i32 2, ptr addrspace(1) [[LPB_NEW_1]], align 4
; CHECK-NEXT: ret void
;
entry:
- %d = getelementptr i32, i32 addrspace(1)* %b, i64 16
- %safepoint_token = invoke token (i64, i32, void ()*, i32, i32, ...) @llvm.experimental.gc.statepoint.p0f_isVoidf(i64 0, i32 0, void ()* elementtype(void ()) @func, i32 0, i32 0, i32 0, i32 0) ["gc-live" (i32 addrspace(1)* %b, i32 addrspace(1)* %b, i32 addrspace(1)* %d, i32 addrspace(1)* %d)]
+ %d = getelementptr i32, ptr addrspace(1) %b, i64 16
+ %safepoint_token = invoke token (i64, i32, ptr, i32, i32, ...) @llvm.experimental.gc.statepoint.p0(i64 0, i32 0, ptr elementtype(void ()) @func, i32 0, i32 0, i32 0, i32 0) ["gc-live" (ptr addrspace(1) %b, ptr addrspace(1) %b, ptr addrspace(1) %d, ptr addrspace(1) %d)]
to label %normal_dest unwind label %unwind_dest
normal_dest:
- %b.new.1 = call i32 addrspace(1)* @llvm.experimental.gc.relocate.p1i32(token %safepoint_token, i32 0, i32 0)
- %b.new.2 = call i32 addrspace(1)* @llvm.experimental.gc.relocate.p1i32(token %safepoint_token, i32 0, i32 1)
- %d.new.1 = call i32 addrspace(1)* @llvm.experimental.gc.relocate.p1i32(token %safepoint_token, i32 0, i32 2)
- %d.new.2 = call i32 addrspace(1)* @llvm.experimental.gc.relocate.p1i32(token %safepoint_token, i32 0, i32 3)
- %d.new.3 = call i32 addrspace(1)* @llvm.experimental.gc.relocate.p1i32(token %safepoint_token, i32 1, i32 2)
- %d.new.4 = call i32 addrspace(1)* @llvm.experimental.gc.relocate.p1i32(token %safepoint_token, i32 1, i32 3)
- store i32 1, i32 addrspace(1)* %b.new.1
+ %b.new.1 = call ptr addrspace(1) @llvm.experimental.gc.relocate.p1(token %safepoint_token, i32 0, i32 0)
+ %b.new.2 = call ptr addrspace(1) @llvm.experimental.gc.relocate.p1(token %safepoint_token, i32 0, i32 1)
+ %d.new.1 = call ptr addrspace(1) @llvm.experimental.gc.relocate.p1(token %safepoint_token, i32 0, i32 2)
+ %d.new.2 = call ptr addrspace(1) @llvm.experimental.gc.relocate.p1(token %safepoint_token, i32 0, i32 3)
+ %d.new.3 = call ptr addrspace(1) @llvm.experimental.gc.relocate.p1(token %safepoint_token, i32 1, i32 2)
+ %d.new.4 = call ptr addrspace(1) @llvm.experimental.gc.relocate.p1(token %safepoint_token, i32 1, i32 3)
+ store i32 1, ptr addrspace(1) %b.new.1
ret void
unwind_dest:
%lpad = landingpad token
cleanup
- %lpb.new.1 = call i32 addrspace(1)* @llvm.experimental.gc.relocate.p1i32(token %lpad, i32 0, i32 0)
- %lpb.new.2 = call i32 addrspace(1)* @llvm.experimental.gc.relocate.p1i32(token %lpad, i32 0, i32 1)
- %lpd.new.1 = call i32 addrspace(1)* @llvm.experimental.gc.relocate.p1i32(token %lpad, i32 0, i32 2)
- %lpd.new.2 = call i32 addrspace(1)* @llvm.experimental.gc.relocate.p1i32(token %lpad, i32 0, i32 3)
- %lpd.new.3 = call i32 addrspace(1)* @llvm.experimental.gc.relocate.p1i32(token %lpad, i32 1, i32 2)
- %lpd.new.4 = call i32 addrspace(1)* @llvm.experimental.gc.relocate.p1i32(token %lpad, i32 1, i32 3)
- store i32 2, i32 addrspace(1)* %lpb.new.1
+ %lpb.new.1 = call ptr addrspace(1) @llvm.experimental.gc.relocate.p1(token %lpad, i32 0, i32 0)
+ %lpb.new.2 = call ptr addrspace(1) @llvm.experimental.gc.relocate.p1(token %lpad, i32 0, i32 1)
+ %lpd.new.1 = call ptr addrspace(1) @llvm.experimental.gc.relocate.p1(token %lpad, i32 0, i32 2)
+ %lpd.new.2 = call ptr addrspace(1) @llvm.experimental.gc.relocate.p1(token %lpad, i32 0, i32 3)
+ %lpd.new.3 = call ptr addrspace(1) @llvm.experimental.gc.relocate.p1(token %lpad, i32 1, i32 2)
+ %lpd.new.4 = call ptr addrspace(1) @llvm.experimental.gc.relocate.p1(token %lpad, i32 1, i32 3)
+ store i32 2, ptr addrspace(1) %lpb.new.1
ret void
}
-define void @test_no_base_use_invoke(i32 addrspace(1)* %b) gc "statepoint-example" personality i32* ()* @fake_personality_function {
+define void @test_no_base_use_invoke(ptr addrspace(1) %b) gc "statepoint-example" personality ptr @fake_personality_function {
; CHECK-LABEL: @test_no_base_use_invoke(
; CHECK-NEXT: entry:
-; CHECK-NEXT: [[D:%.*]] = getelementptr i32, i32 addrspace(1)* [[B:%.*]], i64 16
-; CHECK-NEXT: [[SAFEPOINT_TOKEN:%.*]] = invoke token (i64, i32, void ()*, i32, i32, ...) @llvm.experimental.gc.statepoint.p0f_isVoidf(i64 0, i32 0, void ()* nonnull elementtype(void ()) @func, i32 0, i32 0, i32 0, i32 0) [ "gc-live"(i32 addrspace(1)* [[B]], i32 addrspace(1)* [[D]]) ]
+; CHECK-NEXT: [[D:%.*]] = getelementptr i32, ptr addrspace(1) [[B:%.*]], i64 16
+; CHECK-NEXT: [[SAFEPOINT_TOKEN:%.*]] = invoke token (i64, i32, ptr, i32, i32, ...) @llvm.experimental.gc.statepoint.p0(i64 0, i32 0, ptr nonnull elementtype(void ()) @func, i32 0, i32 0, i32 0, i32 0) [ "gc-live"(ptr addrspace(1) [[B]], ptr addrspace(1) [[D]]) ]
; CHECK-NEXT: to label [[NORMAL_DEST:%.*]] unwind label [[UNWIND_DEST:%.*]]
; CHECK: normal_dest:
-; CHECK-NEXT: [[D_NEW_1:%.*]] = call i32 addrspace(1)* @llvm.experimental.gc.relocate.p1i32(token [[SAFEPOINT_TOKEN]], i32 0, i32 1)
-; CHECK-NEXT: store i32 1, i32 addrspace(1)* [[D_NEW_1]], align 4
+; CHECK-NEXT: [[D_NEW_1:%.*]] = call ptr addrspace(1) @llvm.experimental.gc.relocate.p1(token [[SAFEPOINT_TOKEN]], i32 0, i32 1)
+; CHECK-NEXT: store i32 1, ptr addrspace(1) [[D_NEW_1]], align 4
; CHECK-NEXT: ret void
; CHECK: unwind_dest:
; CHECK-NEXT: [[LPAD:%.*]] = landingpad token
; CHECK-NEXT: cleanup
-; CHECK-NEXT: [[LPD_NEW_1:%.*]] = call i32 addrspace(1)* @llvm.experimental.gc.relocate.p1i32(token [[LPAD]], i32 0, i32 1)
-; CHECK-NEXT: store i32 2, i32 addrspace(1)* [[LPD_NEW_1]], align 4
+; CHECK-NEXT: [[LPD_NEW_1:%.*]] = call ptr addrspace(1) @llvm.experimental.gc.relocate.p1(token [[LPAD]], i32 0, i32 1)
+; CHECK-NEXT: store i32 2, ptr addrspace(1) [[LPD_NEW_1]], align 4
; CHECK-NEXT: ret void
;
entry:
- %d = getelementptr i32, i32 addrspace(1)* %b, i64 16
- %safepoint_token = invoke token (i64, i32, void ()*, i32, i32, ...) @llvm.experimental.gc.statepoint.p0f_isVoidf(i64 0, i32 0, void ()* elementtype(void ()) @func, i32 0, i32 0, i32 0, i32 0) ["gc-live" (i32 addrspace(1)* %b, i32 addrspace(1)* %b, i32 addrspace(1)* %d, i32 addrspace(1)* %d)]
+ %d = getelementptr i32, ptr addrspace(1) %b, i64 16
+ %safepoint_token = invoke token (i64, i32, ptr, i32, i32, ...) @llvm.experimental.gc.statepoint.p0(i64 0, i32 0, ptr elementtype(void ()) @func, i32 0, i32 0, i32 0, i32 0) ["gc-live" (ptr addrspace(1) %b, ptr addrspace(1) %b, ptr addrspace(1) %d, ptr addrspace(1) %d)]
to label %normal_dest unwind label %unwind_dest
normal_dest:
- %b.new.1 = call i32 addrspace(1)* @llvm.experimental.gc.relocate.p1i32(token %safepoint_token, i32 0, i32 0)
- %b.new.2 = call i32 addrspace(1)* @llvm.experimental.gc.relocate.p1i32(token %safepoint_token, i32 0, i32 1)
- %d.new.1 = call i32 addrspace(1)* @llvm.experimental.gc.relocate.p1i32(token %safepoint_token, i32 0, i32 2)
- %d.new.2 = call i32 addrspace(1)* @llvm.experimental.gc.relocate.p1i32(token %safepoint_token, i32 0, i32 3)
- %d.new.3 = call i32 addrspace(1)* @llvm.experimental.gc.relocate.p1i32(token %safepoint_token, i32 1, i32 2)
- %d.new.4 = call i32 addrspace(1)* @llvm.experimental.gc.relocate.p1i32(token %safepoint_token, i32 1, i32 3)
- store i32 1, i32 addrspace(1)* %d.new.1
+ %b.new.1 = call ptr addrspace(1) @llvm.experimental.gc.relocate.p1(token %safepoint_token, i32 0, i32 0)
+ %b.new.2 = call ptr addrspace(1) @llvm.experimental.gc.relocate.p1(token %safepoint_token, i32 0, i32 1)
+ %d.new.1 = call ptr addrspace(1) @llvm.experimental.gc.relocate.p1(token %safepoint_token, i32 0, i32 2)
+ %d.new.2 = call ptr addrspace(1) @llvm.experimental.gc.relocate.p1(token %safepoint_token, i32 0, i32 3)
+ %d.new.3 = call ptr addrspace(1) @llvm.experimental.gc.relocate.p1(token %safepoint_token, i32 1, i32 2)
+ %d.new.4 = call ptr addrspace(1) @llvm.experimental.gc.relocate.p1(token %safepoint_token, i32 1, i32 3)
+ store i32 1, ptr addrspace(1) %d.new.1
ret void
unwind_dest:
%lpad = landingpad token
cleanup
- %lpb.new.1 = call i32 addrspace(1)* @llvm.experimental.gc.relocate.p1i32(token %lpad, i32 0, i32 0)
- %lpb.new.2 = call i32 addrspace(1)* @llvm.experimental.gc.relocate.p1i32(token %lpad, i32 0, i32 1)
- %lpd.new.1 = call i32 addrspace(1)* @llvm.experimental.gc.relocate.p1i32(token %lpad, i32 0, i32 2)
- %lpd.new.2 = call i32 addrspace(1)* @llvm.experimental.gc.relocate.p1i32(token %lpad, i32 0, i32 3)
- %lpd.new.3 = call i32 addrspace(1)* @llvm.experimental.gc.relocate.p1i32(token %lpad, i32 1, i32 2)
- %lpd.new.4 = call i32 addrspace(1)* @llvm.experimental.gc.relocate.p1i32(token %lpad, i32 1, i32 3)
- store i32 2, i32 addrspace(1)* %lpd.new.1
+ %lpb.new.1 = call ptr addrspace(1) @llvm.experimental.gc.relocate.p1(token %lpad, i32 0, i32 0)
+ %lpb.new.2 = call ptr addrspace(1) @llvm.experimental.gc.relocate.p1(token %lpad, i32 0, i32 1)
+ %lpd.new.1 = call ptr addrspace(1) @llvm.experimental.gc.relocate.p1(token %lpad, i32 0, i32 2)
+ %lpd.new.2 = call ptr addrspace(1) @llvm.experimental.gc.relocate.p1(token %lpad, i32 0, i32 3)
+ %lpd.new.3 = call ptr addrspace(1) @llvm.experimental.gc.relocate.p1(token %lpad, i32 1, i32 2)
+ %lpd.new.4 = call ptr addrspace(1) @llvm.experimental.gc.relocate.p1(token %lpad, i32 1, i32 3)
+ store i32 2, ptr addrspace(1) %lpd.new.1
ret void
}
-declare token @llvm.experimental.gc.statepoint.p0f_isVoidf(i64, i32, void ()*, i32, i32, ...)
-declare i32 addrspace(1)* @llvm.experimental.gc.relocate.p1i32(token, i32, i32)
+declare token @llvm.experimental.gc.statepoint.p0(i64, i32, ptr, i32, i32, ...)
+declare ptr addrspace(1) @llvm.experimental.gc.relocate.p1(token, i32, i32)
; CHECK: right:
; CHECK-NEXT: br label [[MERGE:%.*]]
; CHECK: left:
-; CHECK-NEXT: [[SAFEPOINT_TOKEN:%.*]] = tail call token (i64, i32, void ()*, i32, i32, ...) @llvm.experimental.gc.statepoint.p0f_isVoidf(i64 0, i32 0, void ()* nonnull elementtype(void ()) @func, i32 0, i32 0, i32 0, i32 0) [ "gc-live"() ]
+; CHECK-NEXT: [[SAFEPOINT_TOKEN:%.*]] = tail call token (i64, i32, ptr, i32, i32, ...) @llvm.experimental.gc.statepoint.p0(i64 0, i32 0, ptr nonnull elementtype(void ()) @func, i32 0, i32 0, i32 0, i32 0) [ "gc-live"() ]
; CHECK-NEXT: br label [[MERGE]]
; CHECK: merge:
-; CHECK-NEXT: [[SAFEPOINT_TOKEN2:%.*]] = tail call token (i64, i32, void ()*, i32, i32, ...) @llvm.experimental.gc.statepoint.p0f_isVoidf(i64 0, i32 0, void ()* nonnull elementtype(void ()) @func, i32 0, i32 0, i32 0, i32 0) [ "gc-live"() ]
+; CHECK-NEXT: [[SAFEPOINT_TOKEN2:%.*]] = tail call token (i64, i32, ptr, i32, i32, ...) @llvm.experimental.gc.statepoint.p0(i64 0, i32 0, ptr nonnull elementtype(void ()) @func, i32 0, i32 0, i32 0, i32 0) [ "gc-live"() ]
; CHECK-NEXT: ret i1 true
;
entry:
br label %merge
left:
- %safepoint_token = tail call token (i64, i32, void ()*, i32, i32, ...) @llvm.experimental.gc.statepoint.p0f_isVoidf(i64 0, i32 0, void ()* elementtype(void ()) @func, i32 0, i32 0, i32 0, i32 0) ["gc-live" (i32* null)]
- %pnew = call i32* @llvm.experimental.gc.relocate.p1i32(token %safepoint_token, i32 0, i32 0)
+ %safepoint_token = tail call token (i64, i32, ptr, i32, i32, ...) @llvm.experimental.gc.statepoint.p0(i64 0, i32 0, ptr elementtype(void ()) @func, i32 0, i32 0, i32 0, i32 0) ["gc-live" (ptr null)]
+ %pnew = call ptr @llvm.experimental.gc.relocate.p1(token %safepoint_token, i32 0, i32 0)
br label %merge
merge:
- %pnew_phi = phi i32* [null, %right], [%pnew, %left]
- %safepoint_token2 = tail call token (i64, i32, void ()*, i32, i32, ...) @llvm.experimental.gc.statepoint.p0f_isVoidf(i64 0, i32 0, void ()* elementtype(void ()) @func, i32 0, i32 0, i32 0, i32 0) ["gc-live" (i32* %pnew_phi)]
- %pnew2 = call i32* @llvm.experimental.gc.relocate.p1i32(token %safepoint_token2, i32 0, i32 0)
- %cmp = icmp eq i32* %pnew2, null
+ %pnew_phi = phi ptr [null, %right], [%pnew, %left]
+ %safepoint_token2 = tail call token (i64, i32, ptr, i32, i32, ...) @llvm.experimental.gc.statepoint.p0(i64 0, i32 0, ptr elementtype(void ()) @func, i32 0, i32 0, i32 0, i32 0) ["gc-live" (ptr %pnew_phi)]
+ %pnew2 = call ptr @llvm.experimental.gc.relocate.p1(token %safepoint_token2, i32 0, i32 0)
+ %cmp = icmp eq ptr %pnew2, null
ret i1 %cmp
}
-define i32* @test_undef(i1 %cond) gc "statepoint-example" {
+define ptr @test_undef(i1 %cond) gc "statepoint-example" {
; CHECK-LABEL: @test_undef(
; CHECK-NEXT: entry:
; CHECK-NEXT: br i1 [[COND:%.*]], label [[LEFT:%.*]], label [[RIGHT:%.*]]
; CHECK: right:
; CHECK-NEXT: br label [[MERGE:%.*]]
; CHECK: left:
-; CHECK-NEXT: [[SAFEPOINT_TOKEN:%.*]] = tail call token (i64, i32, void ()*, i32, i32, ...) @llvm.experimental.gc.statepoint.p0f_isVoidf(i64 0, i32 0, void ()* nonnull elementtype(void ()) @func, i32 0, i32 0, i32 0, i32 0) [ "gc-live"() ]
+; CHECK-NEXT: [[SAFEPOINT_TOKEN:%.*]] = tail call token (i64, i32, ptr, i32, i32, ...) @llvm.experimental.gc.statepoint.p0(i64 0, i32 0, ptr nonnull elementtype(void ()) @func, i32 0, i32 0, i32 0, i32 0) [ "gc-live"() ]
; CHECK-NEXT: br label [[MERGE]]
; CHECK: merge:
-; CHECK-NEXT: [[SAFEPOINT_TOKEN2:%.*]] = tail call token (i64, i32, void ()*, i32, i32, ...) @llvm.experimental.gc.statepoint.p0f_isVoidf(i64 0, i32 0, void ()* nonnull elementtype(void ()) @func, i32 0, i32 0, i32 0, i32 0) [ "gc-live"() ]
-; CHECK-NEXT: ret i32* undef
+; CHECK-NEXT: [[SAFEPOINT_TOKEN2:%.*]] = tail call token (i64, i32, ptr, i32, i32, ...) @llvm.experimental.gc.statepoint.p0(i64 0, i32 0, ptr nonnull elementtype(void ()) @func, i32 0, i32 0, i32 0, i32 0) [ "gc-live"() ]
+; CHECK-NEXT: ret ptr undef
;
entry:
br i1 %cond, label %left, label %right
br label %merge
left:
- %safepoint_token = tail call token (i64, i32, void ()*, i32, i32, ...) @llvm.experimental.gc.statepoint.p0f_isVoidf(i64 0, i32 0, void ()* elementtype(void ()) @func, i32 0, i32 0, i32 0, i32 0) ["gc-live" (i32* undef)]
- %pnew = call i32* @llvm.experimental.gc.relocate.p1i32(token %safepoint_token, i32 0, i32 0)
+ %safepoint_token = tail call token (i64, i32, ptr, i32, i32, ...) @llvm.experimental.gc.statepoint.p0(i64 0, i32 0, ptr elementtype(void ()) @func, i32 0, i32 0, i32 0, i32 0) ["gc-live" (ptr undef)]
+ %pnew = call ptr @llvm.experimental.gc.relocate.p1(token %safepoint_token, i32 0, i32 0)
br label %merge
merge:
- %pnew_phi = phi i32* [undef, %right], [%pnew, %left]
- %safepoint_token2 = tail call token (i64, i32, void ()*, i32, i32, ...) @llvm.experimental.gc.statepoint.p0f_isVoidf(i64 0, i32 0, void ()* elementtype(void ()) @func, i32 0, i32 0, i32 0, i32 0) ["gc-live" (i32* %pnew_phi)]
- %pnew2 = call i32* @llvm.experimental.gc.relocate.p1i32(token %safepoint_token2, i32 0, i32 0)
- ret i32* %pnew2
+ %pnew_phi = phi ptr [undef, %right], [%pnew, %left]
+ %safepoint_token2 = tail call token (i64, i32, ptr, i32, i32, ...) @llvm.experimental.gc.statepoint.p0(i64 0, i32 0, ptr elementtype(void ()) @func, i32 0, i32 0, i32 0, i32 0) ["gc-live" (ptr %pnew_phi)]
+ %pnew2 = call ptr @llvm.experimental.gc.relocate.p1(token %safepoint_token2, i32 0, i32 0)
+ ret ptr %pnew2
}
-declare token @llvm.experimental.gc.statepoint.p0f_isVoidf(i64, i32, void ()*, i32, i32, ...)
-declare i32* @llvm.experimental.gc.relocate.p1i32(token, i32, i32)
+declare token @llvm.experimental.gc.statepoint.p0(i64, i32, ptr, i32, i32, ...)
+declare ptr @llvm.experimental.gc.relocate.p1(token, i32, i32)
; pointers being relocated at a statepoint.
-declare i32* @fake_personality_function()
+declare ptr @fake_personality_function()
declare void @func()
-define i1 @test_negative(i32 addrspace(1)* %p) gc "statepoint-example" {
+define i1 @test_negative(ptr addrspace(1) %p) gc "statepoint-example" {
; CHECK-LABEL: @test_negative(
; CHECK-NEXT: entry:
-; CHECK-NEXT: [[SAFEPOINT_TOKEN:%.*]] = tail call token (i64, i32, void ()*, i32, i32, ...) @llvm.experimental.gc.statepoint.p0f_isVoidf(i64 0, i32 0, void ()* nonnull elementtype(void ()) @func, i32 0, i32 0, i32 0, i32 0) [ "gc-live"(i32 addrspace(1)* [[P:%.*]]) ]
-; CHECK-NEXT: [[PNEW:%.*]] = call i32 addrspace(1)* @llvm.experimental.gc.relocate.p1i32(token [[SAFEPOINT_TOKEN]], i32 0, i32 0)
-; CHECK-NEXT: [[CMP:%.*]] = icmp eq i32 addrspace(1)* [[PNEW]], null
+; CHECK-NEXT: [[SAFEPOINT_TOKEN:%.*]] = tail call token (i64, i32, ptr, i32, i32, ...) @llvm.experimental.gc.statepoint.p0(i64 0, i32 0, ptr nonnull elementtype(void ()) @func, i32 0, i32 0, i32 0, i32 0) [ "gc-live"(ptr addrspace(1) [[P:%.*]]) ]
+; CHECK-NEXT: [[PNEW:%.*]] = call ptr addrspace(1) @llvm.experimental.gc.relocate.p1(token [[SAFEPOINT_TOKEN]], i32 0, i32 0)
+; CHECK-NEXT: [[CMP:%.*]] = icmp eq ptr addrspace(1) [[PNEW]], null
; CHECK-NEXT: ret i1 [[CMP]]
;
entry:
- %safepoint_token = tail call token (i64, i32, void ()*, i32, i32, ...) @llvm.experimental.gc.statepoint.p0f_isVoidf(i64 0, i32 0, void ()* elementtype(void ()) @func, i32 0, i32 0, i32 0, i32 0) ["gc-live"(i32 addrspace(1)* %p)]
- %pnew = call i32 addrspace(1)* @llvm.experimental.gc.relocate.p1i32(token %safepoint_token, i32 0, i32 0)
- %cmp = icmp eq i32 addrspace(1)* %pnew, null
+ %safepoint_token = tail call token (i64, i32, ptr, i32, i32, ...) @llvm.experimental.gc.statepoint.p0(i64 0, i32 0, ptr elementtype(void ()) @func, i32 0, i32 0, i32 0, i32 0) ["gc-live"(ptr addrspace(1) %p)]
+ %pnew = call ptr addrspace(1) @llvm.experimental.gc.relocate.p1(token %safepoint_token, i32 0, i32 0)
+ %cmp = icmp eq ptr addrspace(1) %pnew, null
ret i1 %cmp
}
-define i1 @test_nonnull(i32 addrspace(1)* nonnull %p) gc "statepoint-example" {
+define i1 @test_nonnull(ptr addrspace(1) nonnull %p) gc "statepoint-example" {
; CHECK-LABEL: @test_nonnull(
; CHECK-NEXT: entry:
-; CHECK-NEXT: [[SAFEPOINT_TOKEN:%.*]] = tail call token (i64, i32, void ()*, i32, i32, ...) @llvm.experimental.gc.statepoint.p0f_isVoidf(i64 0, i32 0, void ()* nonnull elementtype(void ()) @func, i32 0, i32 0, i32 0, i32 0) [ "gc-live"() ]
+; CHECK-NEXT: [[SAFEPOINT_TOKEN:%.*]] = tail call token (i64, i32, ptr, i32, i32, ...) @llvm.experimental.gc.statepoint.p0(i64 0, i32 0, ptr nonnull elementtype(void ()) @func, i32 0, i32 0, i32 0, i32 0) [ "gc-live"() ]
; CHECK-NEXT: ret i1 false
;
entry:
- %safepoint_token = tail call token (i64, i32, void ()*, i32, i32, ...) @llvm.experimental.gc.statepoint.p0f_isVoidf(i64 0, i32 0, void ()* elementtype(void ()) @func, i32 0, i32 0, i32 0, i32 0) ["gc-live"(i32 addrspace(1)* %p)]
- %pnew = call i32 addrspace(1)* @llvm.experimental.gc.relocate.p1i32(token %safepoint_token, i32 0, i32 0)
- %cmp = icmp eq i32 addrspace(1)* %pnew, null
+ %safepoint_token = tail call token (i64, i32, ptr, i32, i32, ...) @llvm.experimental.gc.statepoint.p0(i64 0, i32 0, ptr elementtype(void ()) @func, i32 0, i32 0, i32 0, i32 0) ["gc-live"(ptr addrspace(1) %p)]
+ %pnew = call ptr addrspace(1) @llvm.experimental.gc.relocate.p1(token %safepoint_token, i32 0, i32 0)
+ %cmp = icmp eq ptr addrspace(1) %pnew, null
ret i1 %cmp
}
define i1 @test_null() gc "statepoint-example" {
; CHECK-LABEL: @test_null(
; CHECK-NEXT: entry:
-; CHECK-NEXT: [[SAFEPOINT_TOKEN:%.*]] = tail call token (i64, i32, void ()*, i32, i32, ...) @llvm.experimental.gc.statepoint.p0f_isVoidf(i64 0, i32 0, void ()* nonnull elementtype(void ()) @func, i32 0, i32 0, i32 0, i32 0) [ "gc-live"() ]
+; CHECK-NEXT: [[SAFEPOINT_TOKEN:%.*]] = tail call token (i64, i32, ptr, i32, i32, ...) @llvm.experimental.gc.statepoint.p0(i64 0, i32 0, ptr nonnull elementtype(void ()) @func, i32 0, i32 0, i32 0, i32 0) [ "gc-live"() ]
; CHECK-NEXT: ret i1 true
;
entry:
- %safepoint_token = tail call token (i64, i32, void ()*, i32, i32, ...) @llvm.experimental.gc.statepoint.p0f_isVoidf(i64 0, i32 0, void ()* elementtype(void ()) @func, i32 0, i32 0, i32 0, i32 0) ["gc-live"(i32 addrspace(1)* null)]
- %pnew = call i32 addrspace(1)* @llvm.experimental.gc.relocate.p1i32(token %safepoint_token, i32 0, i32 0)
- %cmp = icmp eq i32 addrspace(1)* %pnew, null
+ %safepoint_token = tail call token (i64, i32, ptr, i32, i32, ...) @llvm.experimental.gc.statepoint.p0(i64 0, i32 0, ptr elementtype(void ()) @func, i32 0, i32 0, i32 0, i32 0) ["gc-live"(ptr addrspace(1) null)]
+ %pnew = call ptr addrspace(1) @llvm.experimental.gc.relocate.p1(token %safepoint_token, i32 0, i32 0)
+ %cmp = icmp eq ptr addrspace(1) %pnew, null
ret i1 %cmp
}
define i1 @test_undef() gc "statepoint-example" {
; CHECK-LABEL: @test_undef(
; CHECK-NEXT: entry:
-; CHECK-NEXT: [[SAFEPOINT_TOKEN:%.*]] = tail call token (i64, i32, void ()*, i32, i32, ...) @llvm.experimental.gc.statepoint.p0f_isVoidf(i64 0, i32 0, void ()* nonnull elementtype(void ()) @func, i32 0, i32 0, i32 0, i32 0) [ "gc-live"() ]
+; CHECK-NEXT: [[SAFEPOINT_TOKEN:%.*]] = tail call token (i64, i32, ptr, i32, i32, ...) @llvm.experimental.gc.statepoint.p0(i64 0, i32 0, ptr nonnull elementtype(void ()) @func, i32 0, i32 0, i32 0, i32 0) [ "gc-live"() ]
; CHECK-NEXT: ret i1 undef
;
entry:
- %safepoint_token = tail call token (i64, i32, void ()*, i32, i32, ...) @llvm.experimental.gc.statepoint.p0f_isVoidf(i64 0, i32 0, void ()* elementtype(void ()) @func, i32 0, i32 0, i32 0, i32 0) ["gc-live"(i32 addrspace(1)* undef)]
- %pnew = call i32 addrspace(1)* @llvm.experimental.gc.relocate.p1i32(token %safepoint_token, i32 0, i32 0)
- %cmp = icmp eq i32 addrspace(1)* %pnew, null
+ %safepoint_token = tail call token (i64, i32, ptr, i32, i32, ...) @llvm.experimental.gc.statepoint.p0(i64 0, i32 0, ptr elementtype(void ()) @func, i32 0, i32 0, i32 0, i32 0) ["gc-live"(ptr addrspace(1) undef)]
+ %pnew = call ptr addrspace(1) @llvm.experimental.gc.relocate.p1(token %safepoint_token, i32 0, i32 0)
+ %cmp = icmp eq ptr addrspace(1) %pnew, null
ret i1 %cmp
}
-define i1 @test_negative_invoke(i32 addrspace(1)* %p) gc "statepoint-example" personality i32* ()* @fake_personality_function {
+define i1 @test_negative_invoke(ptr addrspace(1) %p) gc "statepoint-example" personality ptr @fake_personality_function {
; CHECK-LABEL: @test_negative_invoke(
; CHECK-NEXT: entry:
-; CHECK-NEXT: [[SAFEPOINT_TOKEN:%.*]] = invoke token (i64, i32, void ()*, i32, i32, ...) @llvm.experimental.gc.statepoint.p0f_isVoidf(i64 0, i32 0, void ()* nonnull elementtype(void ()) @func, i32 0, i32 0, i32 0, i32 0) [ "gc-live"(i32 addrspace(1)* [[P:%.*]]) ]
+; CHECK-NEXT: [[SAFEPOINT_TOKEN:%.*]] = invoke token (i64, i32, ptr, i32, i32, ...) @llvm.experimental.gc.statepoint.p0(i64 0, i32 0, ptr nonnull elementtype(void ()) @func, i32 0, i32 0, i32 0, i32 0) [ "gc-live"(ptr addrspace(1) [[P:%.*]]) ]
; CHECK-NEXT: to label [[NORMAL_DEST:%.*]] unwind label [[UNWIND_DEST:%.*]]
; CHECK: normal_dest:
-; CHECK-NEXT: [[PNEW:%.*]] = call i32 addrspace(1)* @llvm.experimental.gc.relocate.p1i32(token [[SAFEPOINT_TOKEN]], i32 0, i32 0)
-; CHECK-NEXT: [[CMP:%.*]] = icmp eq i32 addrspace(1)* [[PNEW]], null
+; CHECK-NEXT: [[PNEW:%.*]] = call ptr addrspace(1) @llvm.experimental.gc.relocate.p1(token [[SAFEPOINT_TOKEN]], i32 0, i32 0)
+; CHECK-NEXT: [[CMP:%.*]] = icmp eq ptr addrspace(1) [[PNEW]], null
; CHECK-NEXT: ret i1 [[CMP]]
; CHECK: unwind_dest:
; CHECK-NEXT: [[LPAD:%.*]] = landingpad token
; CHECK-NEXT: cleanup
-; CHECK-NEXT: [[PNEW2:%.*]] = call i32 addrspace(1)* @llvm.experimental.gc.relocate.p1i32(token [[LPAD]], i32 0, i32 0)
-; CHECK-NEXT: [[CMP2:%.*]] = icmp ne i32 addrspace(1)* [[PNEW2]], null
+; CHECK-NEXT: [[PNEW2:%.*]] = call ptr addrspace(1) @llvm.experimental.gc.relocate.p1(token [[LPAD]], i32 0, i32 0)
+; CHECK-NEXT: [[CMP2:%.*]] = icmp ne ptr addrspace(1) [[PNEW2]], null
; CHECK-NEXT: ret i1 [[CMP2]]
;
entry:
- %safepoint_token = invoke token (i64, i32, void ()*, i32, i32, ...) @llvm.experimental.gc.statepoint.p0f_isVoidf(i64 0, i32 0, void ()* elementtype(void ()) @func, i32 0, i32 0, i32 0, i32 0) ["gc-live"(i32 addrspace(1)* %p)]
+ %safepoint_token = invoke token (i64, i32, ptr, i32, i32, ...) @llvm.experimental.gc.statepoint.p0(i64 0, i32 0, ptr elementtype(void ()) @func, i32 0, i32 0, i32 0, i32 0) ["gc-live"(ptr addrspace(1) %p)]
to label %normal_dest unwind label %unwind_dest
normal_dest:
- %pnew = call i32 addrspace(1)* @llvm.experimental.gc.relocate.p1i32(token %safepoint_token, i32 0, i32 0)
- %cmp = icmp eq i32 addrspace(1)* %pnew, null
+ %pnew = call ptr addrspace(1) @llvm.experimental.gc.relocate.p1(token %safepoint_token, i32 0, i32 0)
+ %cmp = icmp eq ptr addrspace(1) %pnew, null
ret i1 %cmp
unwind_dest:
%lpad = landingpad token
cleanup
- %pnew2 = call i32 addrspace(1)* @llvm.experimental.gc.relocate.p1i32(token %lpad, i32 0, i32 0)
- %cmp2 = icmp ne i32 addrspace(1)* %pnew2, null
+ %pnew2 = call ptr addrspace(1) @llvm.experimental.gc.relocate.p1(token %lpad, i32 0, i32 0)
+ %cmp2 = icmp ne ptr addrspace(1) %pnew2, null
ret i1 %cmp2
}
-define i1 @test_nonnull_invoke(i32 addrspace(1)* nonnull %p) gc "statepoint-example" personality i32* ()* @fake_personality_function {
+define i1 @test_nonnull_invoke(ptr addrspace(1) nonnull %p) gc "statepoint-example" personality ptr @fake_personality_function {
; CHECK-LABEL: @test_nonnull_invoke(
; CHECK-NEXT: entry:
-; CHECK-NEXT: [[SAFEPOINT_TOKEN:%.*]] = invoke token (i64, i32, void ()*, i32, i32, ...) @llvm.experimental.gc.statepoint.p0f_isVoidf(i64 0, i32 0, void ()* nonnull elementtype(void ()) @func, i32 0, i32 0, i32 0, i32 0) [ "gc-live"() ]
+; CHECK-NEXT: [[SAFEPOINT_TOKEN:%.*]] = invoke token (i64, i32, ptr, i32, i32, ...) @llvm.experimental.gc.statepoint.p0(i64 0, i32 0, ptr nonnull elementtype(void ()) @func, i32 0, i32 0, i32 0, i32 0) [ "gc-live"() ]
; CHECK-NEXT: to label [[NORMAL_DEST:%.*]] unwind label [[UNWIND_DEST:%.*]]
; CHECK: normal_dest:
; CHECK-NEXT: ret i1 false
; CHECK-NEXT: ret i1 true
;
entry:
- %safepoint_token = invoke token (i64, i32, void ()*, i32, i32, ...) @llvm.experimental.gc.statepoint.p0f_isVoidf(i64 0, i32 0, void ()* elementtype(void ()) @func, i32 0, i32 0, i32 0, i32 0) ["gc-live"(i32 addrspace(1)* %p)]
+ %safepoint_token = invoke token (i64, i32, ptr, i32, i32, ...) @llvm.experimental.gc.statepoint.p0(i64 0, i32 0, ptr elementtype(void ()) @func, i32 0, i32 0, i32 0, i32 0) ["gc-live"(ptr addrspace(1) %p)]
to label %normal_dest unwind label %unwind_dest
normal_dest:
- %pnew = call i32 addrspace(1)* @llvm.experimental.gc.relocate.p1i32(token %safepoint_token, i32 0, i32 0)
- %cmp = icmp eq i32 addrspace(1)* %pnew, null
+ %pnew = call ptr addrspace(1) @llvm.experimental.gc.relocate.p1(token %safepoint_token, i32 0, i32 0)
+ %cmp = icmp eq ptr addrspace(1) %pnew, null
ret i1 %cmp
unwind_dest:
%lpad = landingpad token
cleanup
- %pnew2 = call i32 addrspace(1)* @llvm.experimental.gc.relocate.p1i32(token %lpad, i32 0, i32 0)
- %cmp2 = icmp ne i32 addrspace(1)* %pnew2, null
+ %pnew2 = call ptr addrspace(1) @llvm.experimental.gc.relocate.p1(token %lpad, i32 0, i32 0)
+ %cmp2 = icmp ne ptr addrspace(1) %pnew2, null
ret i1 %cmp2
}
-define i1 @test_null_invoke() gc "statepoint-example" personality i32* ()* @fake_personality_function {
+define i1 @test_null_invoke() gc "statepoint-example" personality ptr @fake_personality_function {
; CHECK-LABEL: @test_null_invoke(
; CHECK-NEXT: entry:
-; CHECK-NEXT: [[SAFEPOINT_TOKEN:%.*]] = invoke token (i64, i32, void ()*, i32, i32, ...) @llvm.experimental.gc.statepoint.p0f_isVoidf(i64 0, i32 0, void ()* nonnull elementtype(void ()) @func, i32 0, i32 0, i32 0, i32 0) [ "gc-live"() ]
+; CHECK-NEXT: [[SAFEPOINT_TOKEN:%.*]] = invoke token (i64, i32, ptr, i32, i32, ...) @llvm.experimental.gc.statepoint.p0(i64 0, i32 0, ptr nonnull elementtype(void ()) @func, i32 0, i32 0, i32 0, i32 0) [ "gc-live"() ]
; CHECK-NEXT: to label [[NORMAL_DEST:%.*]] unwind label [[UNWIND_DEST:%.*]]
; CHECK: normal_dest:
; CHECK-NEXT: ret i1 true
; CHECK-NEXT: ret i1 false
;
entry:
- %safepoint_token = invoke token (i64, i32, void ()*, i32, i32, ...) @llvm.experimental.gc.statepoint.p0f_isVoidf(i64 0, i32 0, void ()* elementtype(void ()) @func, i32 0, i32 0, i32 0, i32 0) ["gc-live"(i32 addrspace(1)* null)]
+ %safepoint_token = invoke token (i64, i32, ptr, i32, i32, ...) @llvm.experimental.gc.statepoint.p0(i64 0, i32 0, ptr elementtype(void ()) @func, i32 0, i32 0, i32 0, i32 0) ["gc-live"(ptr addrspace(1) null)]
to label %normal_dest unwind label %unwind_dest
normal_dest:
- %pnew = call i32 addrspace(1)* @llvm.experimental.gc.relocate.p1i32(token %safepoint_token, i32 0, i32 0)
- %cmp = icmp eq i32 addrspace(1)* %pnew, null
+ %pnew = call ptr addrspace(1) @llvm.experimental.gc.relocate.p1(token %safepoint_token, i32 0, i32 0)
+ %cmp = icmp eq ptr addrspace(1) %pnew, null
ret i1 %cmp
unwind_dest:
%lpad = landingpad token
cleanup
- %pnew2 = call i32 addrspace(1)* @llvm.experimental.gc.relocate.p1i32(token %lpad, i32 0, i32 0)
- %cmp2 = icmp ne i32 addrspace(1)* %pnew2, null
+ %pnew2 = call ptr addrspace(1) @llvm.experimental.gc.relocate.p1(token %lpad, i32 0, i32 0)
+ %cmp2 = icmp ne ptr addrspace(1) %pnew2, null
ret i1 %cmp2
}
-define i1 @test_undef_invoke() gc "statepoint-example" personality i32* ()* @fake_personality_function {
+define i1 @test_undef_invoke() gc "statepoint-example" personality ptr @fake_personality_function {
; CHECK-LABEL: @test_undef_invoke(
; CHECK-NEXT: entry:
-; CHECK-NEXT: [[SAFEPOINT_TOKEN:%.*]] = invoke token (i64, i32, void ()*, i32, i32, ...) @llvm.experimental.gc.statepoint.p0f_isVoidf(i64 0, i32 0, void ()* nonnull elementtype(void ()) @func, i32 0, i32 0, i32 0, i32 0) [ "gc-live"() ]
+; CHECK-NEXT: [[SAFEPOINT_TOKEN:%.*]] = invoke token (i64, i32, ptr, i32, i32, ...) @llvm.experimental.gc.statepoint.p0(i64 0, i32 0, ptr nonnull elementtype(void ()) @func, i32 0, i32 0, i32 0, i32 0) [ "gc-live"() ]
; CHECK-NEXT: to label [[NORMAL_DEST:%.*]] unwind label [[UNWIND_DEST:%.*]]
; CHECK: normal_dest:
; CHECK-NEXT: ret i1 undef
; CHECK-NEXT: ret i1 undef
;
entry:
- %safepoint_token = invoke token (i64, i32, void ()*, i32, i32, ...) @llvm.experimental.gc.statepoint.p0f_isVoidf(i64 0, i32 0, void ()* elementtype(void ()) @func, i32 0, i32 0, i32 0, i32 0) ["gc-live"(i32 addrspace(1)* undef)]
+ %safepoint_token = invoke token (i64, i32, ptr, i32, i32, ...) @llvm.experimental.gc.statepoint.p0(i64 0, i32 0, ptr elementtype(void ()) @func, i32 0, i32 0, i32 0, i32 0) ["gc-live"(ptr addrspace(1) undef)]
to label %normal_dest unwind label %unwind_dest
normal_dest:
- %pnew = call i32 addrspace(1)* @llvm.experimental.gc.relocate.p1i32(token %safepoint_token, i32 0, i32 0)
- %cmp = icmp eq i32 addrspace(1)* %pnew, null
+ %pnew = call ptr addrspace(1) @llvm.experimental.gc.relocate.p1(token %safepoint_token, i32 0, i32 0)
+ %cmp = icmp eq ptr addrspace(1) %pnew, null
ret i1 %cmp
unwind_dest:
%lpad = landingpad token
cleanup
- %pnew2 = call i32 addrspace(1)* @llvm.experimental.gc.relocate.p1i32(token %lpad, i32 0, i32 0)
- %cmp2 = icmp ne i32 addrspace(1)* %pnew2, null
+ %pnew2 = call ptr addrspace(1) @llvm.experimental.gc.relocate.p1(token %lpad, i32 0, i32 0)
+ %cmp2 = icmp ne ptr addrspace(1) %pnew2, null
ret i1 %cmp2
}
-declare token @llvm.experimental.gc.statepoint.p0f_isVoidf(i64, i32, void ()*, i32, i32, ...)
-declare i32 addrspace(1)* @llvm.experimental.gc.relocate.p1i32(token, i32, i32) #3
+declare token @llvm.experimental.gc.statepoint.p0(i64, i32, ptr, i32, i32, ...)
+declare ptr addrspace(1) @llvm.experimental.gc.relocate.p1(token, i32, i32) #3
; NOTE: Assertions have been autogenerated by utils/update_test_checks.py
; RUN: opt < %s -passes=instcombine -opaque-pointers -S | FileCheck %s
-declare i32 @fwrite(i8*, i64, i64, ptr)
+declare i32 @fwrite(ptr, i64, i64, ptr)
declare i8 @fputc(ptr, ptr)
declare void @printf(ptr)
; trigger an abort).
define void @call_fwrite(ptr %fp) {
- %p = getelementptr [1 x i8], ptr @ca1, i32 0, i32 0
- call i32 @fwrite(ptr %p, i64 1, i64 1, ptr %fp)
+ call i32 @fwrite(ptr @ca1, i64 1, i64 1, ptr %fp)
ret void
}
define void @call_printf(ptr %s) {
; CHECK-LABEL: @call_printf(
;
- %fmt = getelementptr [3 x i8], ptr @pcnt_s, i32 0, i32 0
- call i32 @printf(ptr %fmt)
+ call i32 @printf(ptr @pcnt_s)
ret void
}
define i8 @call_fprintf(ptr %fp, ptr %p) {
; CHECK-LABEL: @call_fprintf(
;
- %fmt = getelementptr [3 x i8], ptr @pcnt_s, i32 0, i32 0
- %call = call i8 (ptr, ptr, ...) @fprintf(ptr %fp, ptr %fmt, ptr %p)
+ %call = call i8 (ptr, ptr, ...) @fprintf(ptr %fp, ptr @pcnt_s, ptr %p)
ret i8 %call
}
define i8 @call_sprintf(ptr %p, ptr %q) {
; CHECK-LABEL: @call_sprintf(
;
- %fmt = getelementptr [3 x i8], ptr @pcnt_s, i32 0, i32 0
- %call = call i8 (ptr, ptr, ...) @sprintf(ptr %p, ptr %fmt, ptr %q)
+ %call = call i8 (ptr, ptr, ...) @sprintf(ptr %p, ptr @pcnt_s, ptr %q)
ret i8 %call
}
%array = alloca i32, i32 2
; CHECK-NOT: %array
- %length_gep = getelementptr inbounds i32, i32 * %array, i32 0
- %value_gep = getelementptr inbounds i32, i32 * %array, i32 1
- store i32 %length, i32 * %length_gep
- store i32 0, i32 * %value_gep
- %loaded_length = load i32, i32 * %length_gep
+ %value_gep = getelementptr inbounds i32, ptr %array, i32 1
+ store i32 %length, ptr %array
+ store i32 0, ptr %value_gep
+ %loaded_length = load i32, ptr %array
; CHECK-NOT: %loaded_length = load i32
ret i32 %loaded_length
; FIXME: This is technically incorrect because it might overwrite a poison
; value. Stop folding it once #52930 is resolved.
-define void @store_of_undef(i32* %P) {
+define void @store_of_undef(ptr %P) {
; CHECK-LABEL: @store_of_undef(
; CHECK-NEXT: ret void
;
- store i32 undef, i32* %P
+ store i32 undef, ptr %P
ret void
}
-define void @store_of_poison(i32* %P) {
+define void @store_of_poison(ptr %P) {
; CHECK-LABEL: @store_of_poison(
; CHECK-NEXT: ret void
;
- store i32 poison, i32* %P
+ store i32 poison, ptr %P
ret void
}
-define void @store_into_undef(i32* %P) {
+define void @store_into_undef(ptr %P) {
; CHECK-LABEL: @store_into_undef(
-; CHECK-NEXT: store i32 123, i32* undef, align 4
+; CHECK-NEXT: store i32 123, ptr undef, align 4
; CHECK-NEXT: ret void
;
- store i32 123, i32* undef
+ store i32 123, ptr undef
ret void
}
-define void @store_into_null(i32* %P) {
+define void @store_into_null(ptr %P) {
; CHECK-LABEL: @store_into_null(
-; CHECK-NEXT: store i32 poison, i32* null, align 4294967296
+; CHECK-NEXT: store i32 poison, ptr null, align 4294967296
; CHECK-NEXT: ret void
;
- store i32 124, i32* null
+ store i32 124, ptr null
ret void
}
-define void @test2(i32* %P) {
+define void @test2(ptr %P) {
; CHECK-LABEL: @test2(
; CHECK-NEXT: ret void
;
- %X = load i32, i32* %P
+ %X = load i32, ptr %P
%Y = add i32 %X, 0
- store i32 %Y, i32* %P
+ store i32 %Y, ptr %P
ret void
}
define void @store_at_gep_off_null_inbounds(i64 %offset) {
; CHECK-LABEL: @store_at_gep_off_null_inbounds(
-; CHECK-NEXT: [[PTR:%.*]] = getelementptr inbounds i32, i32* null, i64 [[OFFSET:%.*]]
-; CHECK-NEXT: store i32 poison, i32* [[PTR]], align 4
+; CHECK-NEXT: [[PTR:%.*]] = getelementptr inbounds i32, ptr null, i64 [[OFFSET:%.*]]
+; CHECK-NEXT: store i32 poison, ptr [[PTR]], align 4
; CHECK-NEXT: ret void
;
- %ptr = getelementptr inbounds i32, i32 *null, i64 %offset
- store i32 24, i32* %ptr
+ %ptr = getelementptr inbounds i32, ptr null, i64 %offset
+ store i32 24, ptr %ptr
ret void
}
define void @store_at_gep_off_null_not_inbounds(i64 %offset) {
; CHECK-LABEL: @store_at_gep_off_null_not_inbounds(
-; CHECK-NEXT: [[PTR:%.*]] = getelementptr i32, i32* null, i64 [[OFFSET:%.*]]
-; CHECK-NEXT: store i32 poison, i32* [[PTR]], align 4
+; CHECK-NEXT: [[PTR:%.*]] = getelementptr i32, ptr null, i64 [[OFFSET:%.*]]
+; CHECK-NEXT: store i32 poison, ptr [[PTR]], align 4
; CHECK-NEXT: ret void
;
- %ptr = getelementptr i32, i32 *null, i64 %offset
- store i32 24, i32* %ptr
+ %ptr = getelementptr i32, ptr null, i64 %offset
+ store i32 24, ptr %ptr
ret void
}
define void @store_at_gep_off_no_null_opt(i64 %offset) #0 {
; CHECK-LABEL: @store_at_gep_off_no_null_opt(
-; CHECK-NEXT: [[PTR:%.*]] = getelementptr inbounds i32, i32* null, i64 [[OFFSET:%.*]]
-; CHECK-NEXT: store i32 24, i32* [[PTR]], align 4
+; CHECK-NEXT: [[PTR:%.*]] = getelementptr inbounds i32, ptr null, i64 [[OFFSET:%.*]]
+; CHECK-NEXT: store i32 24, ptr [[PTR]], align 4
; CHECK-NEXT: ret void
;
- %ptr = getelementptr inbounds i32, i32 *null, i64 %offset
- store i32 24, i32* %ptr
+ %ptr = getelementptr inbounds i32, ptr null, i64 %offset
+ store i32 24, ptr %ptr
ret void
}
br i1 %C, label %Cond, label %Cond2
Cond:
- store i32 -987654321, i32* %A
+ store i32 -987654321, ptr %A
br label %Cont
Cond2:
- store i32 47, i32* %A
+ store i32 47, ptr %A
br label %Cont
Cont:
- %V = load i32, i32* %A
+ %V = load i32, ptr %A
ret i32 %V
}
; CHECK-NEXT: ret i32 [[STOREMERGE]]
;
%A = alloca i32
- store i32 47, i32* %A
+ store i32 47, ptr %A
br i1 %C, label %Cond, label %Cont
Cond:
- store i32 -987654321, i32* %A
+ store i32 -987654321, ptr %A
br label %Cont
Cont:
- %V = load i32, i32* %A
+ %V = load i32, ptr %A
ret i32 %V
}
; "if then"
-define void @test5(i1 %C, i32* %P) {
+define void @test5(i1 %C, ptr %P) {
; CHECK-LABEL: @test5(
; CHECK-NEXT: br i1 [[C:%.*]], label [[COND:%.*]], label [[CONT:%.*]]
; CHECK: Cond:
; CHECK-NEXT: br label [[CONT]]
; CHECK: Cont:
; CHECK-NEXT: [[STOREMERGE:%.*]] = phi i32 [ -987654321, [[COND]] ], [ 47, [[TMP0:%.*]] ]
-; CHECK-NEXT: store i32 [[STOREMERGE]], i32* [[P:%.*]], align 1
+; CHECK-NEXT: store i32 [[STOREMERGE]], ptr [[P:%.*]], align 1
; CHECK-NEXT: ret void
;
- store i32 47, i32* %P, align 1
+ store i32 47, ptr %P, align 1
br i1 %C, label %Cond, label %Cont
Cond:
- store i32 -987654321, i32* %P, align 1
+ store i32 -987654321, ptr %P, align 1
br label %Cont
Cont:
; PR14753 - merging two stores should preserve the TBAA tag.
-define void @test6(i32 %n, float* %a, i32* %gi) nounwind uwtable ssp {
+define void @test6(i32 %n, ptr %a, ptr %gi) nounwind uwtable ssp {
; CHECK-LABEL: @test6(
; CHECK-NEXT: entry:
; CHECK-NEXT: br label [[FOR_COND:%.*]]
; CHECK: for.cond:
; CHECK-NEXT: [[STOREMERGE:%.*]] = phi i32 [ 42, [[ENTRY:%.*]] ], [ [[INC:%.*]], [[FOR_BODY:%.*]] ]
-; CHECK-NEXT: store i32 [[STOREMERGE]], i32* [[GI:%.*]], align 4, !tbaa [[TBAA0:![0-9]+]]
+; CHECK-NEXT: store i32 [[STOREMERGE]], ptr [[GI:%.*]], align 4, !tbaa [[TBAA0:![0-9]+]]
; CHECK-NEXT: [[CMP:%.*]] = icmp slt i32 [[STOREMERGE]], [[N:%.*]]
; CHECK-NEXT: br i1 [[CMP]], label [[FOR_BODY]], label [[FOR_END:%.*]]
; CHECK: for.body:
; CHECK-NEXT: [[IDXPROM:%.*]] = sext i32 [[STOREMERGE]] to i64
-; CHECK-NEXT: [[ARRAYIDX:%.*]] = getelementptr inbounds float, float* [[A:%.*]], i64 [[IDXPROM]]
-; CHECK-NEXT: store float 0.000000e+00, float* [[ARRAYIDX]], align 4, !tbaa [[TBAA4:![0-9]+]]
-; CHECK-NEXT: [[TMP0:%.*]] = load i32, i32* [[GI]], align 4, !tbaa [[TBAA0]]
+; CHECK-NEXT: [[ARRAYIDX:%.*]] = getelementptr inbounds float, ptr [[A:%.*]], i64 [[IDXPROM]]
+; CHECK-NEXT: store float 0.000000e+00, ptr [[ARRAYIDX]], align 4, !tbaa [[TBAA4:![0-9]+]]
+; CHECK-NEXT: [[TMP0:%.*]] = load i32, ptr [[GI]], align 4, !tbaa [[TBAA0]]
; CHECK-NEXT: [[INC]] = add nsw i32 [[TMP0]], 1
; CHECK-NEXT: br label [[FOR_COND]]
; CHECK: for.end:
; CHECK-NEXT: ret void
;
entry:
- store i32 42, i32* %gi, align 4, !tbaa !0
+ store i32 42, ptr %gi, align 4, !tbaa !0
br label %for.cond
for.cond:
%storemerge = phi i32 [ 0, %entry ], [ %inc, %for.body ]
- %0 = load i32, i32* %gi, align 4, !tbaa !0
+ %0 = load i32, ptr %gi, align 4, !tbaa !0
%cmp = icmp slt i32 %0, %n
br i1 %cmp, label %for.body, label %for.end
for.body:
%idxprom = sext i32 %0 to i64
- %arrayidx = getelementptr inbounds float, float* %a, i64 %idxprom
- store float 0.000000e+00, float* %arrayidx, align 4, !tbaa !3
- %1 = load i32, i32* %gi, align 4, !tbaa !0
+ %arrayidx = getelementptr inbounds float, ptr %a, i64 %idxprom
+ store float 0.000000e+00, ptr %arrayidx, align 4, !tbaa !3
+ %1 = load i32, ptr %gi, align 4, !tbaa !0
%inc = add nsw i32 %1, 1
- store i32 %inc, i32* %gi, align 4, !tbaa !0
+ store i32 %inc, ptr %gi, align 4, !tbaa !0
br label %for.cond
for.end:
ret void
}
-define void @dse1(i32* %p) {
+define void @dse1(ptr %p) {
; CHECK-LABEL: @dse1(
-; CHECK-NEXT: store i32 0, i32* [[P:%.*]], align 4
+; CHECK-NEXT: store i32 0, ptr [[P:%.*]], align 4
; CHECK-NEXT: ret void
;
- store i32 0, i32* %p
- store i32 0, i32* %p
+ store i32 0, ptr %p
+ store i32 0, ptr %p
ret void
}
; same location, then the contents of the location are undefined if there's
; an actual race. As such, we're free to pick either store under the
; assumption that we're not racing with any other thread.
-define void @dse2(i32* %p) {
+define void @dse2(ptr %p) {
; CHECK-LABEL: @dse2(
-; CHECK-NEXT: store i32 0, i32* [[P:%.*]], align 4
+; CHECK-NEXT: store i32 0, ptr [[P:%.*]], align 4
; CHECK-NEXT: ret void
;
- store atomic i32 0, i32* %p unordered, align 4
- store i32 0, i32* %p
+ store atomic i32 0, ptr %p unordered, align 4
+ store i32 0, ptr %p
ret void
}
-define void @dse3(i32* %p) {
+define void @dse3(ptr %p) {
; CHECK-LABEL: @dse3(
-; CHECK-NEXT: store atomic i32 0, i32* [[P:%.*]] unordered, align 4
+; CHECK-NEXT: store atomic i32 0, ptr [[P:%.*]] unordered, align 4
; CHECK-NEXT: ret void
;
- store i32 0, i32* %p
- store atomic i32 0, i32* %p unordered, align 4
+ store i32 0, ptr %p
+ store atomic i32 0, ptr %p unordered, align 4
ret void
}
-define void @dse4(i32* %p) {
+define void @dse4(ptr %p) {
; CHECK-LABEL: @dse4(
-; CHECK-NEXT: store atomic i32 0, i32* [[P:%.*]] unordered, align 4
+; CHECK-NEXT: store atomic i32 0, ptr [[P:%.*]] unordered, align 4
; CHECK-NEXT: ret void
;
- store atomic i32 0, i32* %p unordered, align 4
- store atomic i32 0, i32* %p unordered, align 4
+ store atomic i32 0, ptr %p unordered, align 4
+ store atomic i32 0, ptr %p unordered, align 4
ret void
}
; Implementation limit - could remove unordered store here, but
; currently don't.
-define void @dse5(i32* %p) {
+define void @dse5(ptr %p) {
; CHECK-LABEL: @dse5(
-; CHECK-NEXT: store atomic i32 0, i32* [[P:%.*]] unordered, align 4
-; CHECK-NEXT: store atomic i32 0, i32* [[P]] seq_cst, align 4
+; CHECK-NEXT: store atomic i32 0, ptr [[P:%.*]] unordered, align 4
+; CHECK-NEXT: store atomic i32 0, ptr [[P]] seq_cst, align 4
; CHECK-NEXT: ret void
;
- store atomic i32 0, i32* %p unordered, align 4
- store atomic i32 0, i32* %p seq_cst, align 4
+ store atomic i32 0, ptr %p unordered, align 4
+ store atomic i32 0, ptr %p seq_cst, align 4
ret void
}
-define void @write_back1(i32* %p) {
+define void @write_back1(ptr %p) {
; CHECK-LABEL: @write_back1(
; CHECK-NEXT: ret void
;
- %v = load i32, i32* %p
- store i32 %v, i32* %p
+ %v = load i32, ptr %p
+ store i32 %v, ptr %p
ret void
}
-define void @write_back2(i32* %p) {
+define void @write_back2(ptr %p) {
; CHECK-LABEL: @write_back2(
; CHECK-NEXT: ret void
;
- %v = load atomic i32, i32* %p unordered, align 4
- store i32 %v, i32* %p
+ %v = load atomic i32, ptr %p unordered, align 4
+ store i32 %v, ptr %p
ret void
}
-define void @write_back3(i32* %p) {
+define void @write_back3(ptr %p) {
; CHECK-LABEL: @write_back3(
; CHECK-NEXT: ret void
;
- %v = load i32, i32* %p
- store atomic i32 %v, i32* %p unordered, align 4
+ %v = load i32, ptr %p
+ store atomic i32 %v, ptr %p unordered, align 4
ret void
}
-define void @write_back4(i32* %p) {
+define void @write_back4(ptr %p) {
; CHECK-LABEL: @write_back4(
; CHECK-NEXT: ret void
;
- %v = load atomic i32, i32* %p unordered, align 4
- store atomic i32 %v, i32* %p unordered, align 4
+ %v = load atomic i32, ptr %p unordered, align 4
+ store atomic i32 %v, ptr %p unordered, align 4
ret void
}
; Can't remove store due to ordering side effect
-define void @write_back5(i32* %p) {
+define void @write_back5(ptr %p) {
; CHECK-LABEL: @write_back5(
-; CHECK-NEXT: [[V:%.*]] = load atomic i32, i32* [[P:%.*]] unordered, align 4
-; CHECK-NEXT: store atomic i32 [[V]], i32* [[P]] seq_cst, align 4
+; CHECK-NEXT: [[V:%.*]] = load atomic i32, ptr [[P:%.*]] unordered, align 4
+; CHECK-NEXT: store atomic i32 [[V]], ptr [[P]] seq_cst, align 4
; CHECK-NEXT: ret void
;
- %v = load atomic i32, i32* %p unordered, align 4
- store atomic i32 %v, i32* %p seq_cst, align 4
+ %v = load atomic i32, ptr %p unordered, align 4
+ store atomic i32 %v, ptr %p seq_cst, align 4
ret void
}
-define void @write_back6(i32* %p) {
+define void @write_back6(ptr %p) {
; CHECK-LABEL: @write_back6(
-; CHECK-NEXT: [[V:%.*]] = load atomic i32, i32* [[P:%.*]] seq_cst, align 4
+; CHECK-NEXT: [[V:%.*]] = load atomic i32, ptr [[P:%.*]] seq_cst, align 4
; CHECK-NEXT: ret void
;
- %v = load atomic i32, i32* %p seq_cst, align 4
- store atomic i32 %v, i32* %p unordered, align 4
+ %v = load atomic i32, ptr %p seq_cst, align 4
+ store atomic i32 %v, ptr %p unordered, align 4
ret void
}
-define void @write_back7(i32* %p) {
+define void @write_back7(ptr %p) {
; CHECK-LABEL: @write_back7(
-; CHECK-NEXT: [[V:%.*]] = load atomic volatile i32, i32* [[P:%.*]] seq_cst, align 4
+; CHECK-NEXT: [[V:%.*]] = load atomic volatile i32, ptr [[P:%.*]] seq_cst, align 4
; CHECK-NEXT: ret void
;
- %v = load atomic volatile i32, i32* %p seq_cst, align 4
- store atomic i32 %v, i32* %p unordered, align 4
+ %v = load atomic volatile i32, ptr %p seq_cst, align 4
+ store atomic i32 %v, ptr %p unordered, align 4
ret void
}
; CHECK-LABEL: @store_to_constant(
; CHECK-NEXT: ret void
;
- store i32 0, i32* @Unknown
+ store i32 0, ptr @Unknown
ret void
}
br i1 %c1, label %lhs, label %rhs
lhs:
- store i32 1, i32* %baz
+ store i32 1, ptr %baz
br label %cleanup
rhs:
- store i32 2, i32* %baz
+ store i32 2, ptr %baz
br label %cleanup
cleanup:
; CHECK: %storemerge = phi i32 [ 2, %rhs ], [ 1, %lhs ], !dbg [[merge_loc:![0-9]+]]
- %baz.val = load i32, i32* %baz
+ %baz.val = load i32, ptr %baz
%ret.val = call i32 @escape(i32 %baz.val)
ret i32 %ret.val
}
@hello = constant [6 x i8] c"hello\00"
@a = common global [32 x i8] zeroinitializer, align 1
-declare i16 @stpcpy(i8*, i8*)
+declare i16 @stpcpy(ptr, ptr)
define void @test_no_simplify1() {
; CHECK-LABEL: @test_no_simplify1(
- %dst = getelementptr [32 x i8], [32 x i8]* @a, i32 0, i32 0
- %src = getelementptr [6 x i8], [6 x i8]* @hello, i32 0, i32 0
- call i16 @stpcpy(i8* %dst, i8* %src)
+ call i16 @stpcpy(ptr @a, ptr @hello)
; CHECK: call i16 @stpcpy
ret void
}
define void @test_no_simplify() {
; CHECK-LABEL: @test_no_simplify(
- %dst = getelementptr inbounds [60 x i16], [60 x i16]* @a, i32 0, i32 0
- %src = getelementptr inbounds [8 x i8], [8 x i8]* @.str, i32 0, i32 0
; CHECK-NEXT: call i16 @__strcpy_chk
- call i16 @__strcpy_chk(i16* %dst, i8* %src, i32 8)
+ call i16 @__strcpy_chk(ptr @a, ptr @.str, i32 8)
ret void
}
-declare i16 @__strcpy_chk(i16*, i8*, i32)
+declare i16 @__strcpy_chk(ptr, ptr, i32)
; of constant structs as arguments are folded to constants as expected.
; RUN: opt < %s -passes=instcombine -S | FileCheck %s
-declare i32 @atoi(i8*)
-declare i64 @atol(i8*)
-declare i64 @atoll(i8*)
+declare i32 @atoi(ptr)
+declare i64 @atol(ptr)
+declare i64 @atoll(ptr)
-declare i64 @strtol(i8*, i8**, i32)
-declare i64 @strtoll(i8*, i8**, i32)
+declare i64 @strtol(ptr, ptr, i32)
+declare i64 @strtoll(ptr, ptr, i32)
%struct.A = type { [4 x i8], [5 x i8], [7 x i8] }
; Fold atoi(a[I].M) for constant I in [0, 1] and member M in [a, b]
; to a constant.
-define void @fold_atoi_member(i32* %pi) {
+define void @fold_atoi_member(ptr %pi) {
; CHECK-LABEL: @fold_atoi_member(
-; CHECK-NEXT: store i32 1, i32* [[PI:%.*]], align 4
-; CHECK-NEXT: [[PIA0B:%.*]] = getelementptr i32, i32* [[PI]], i64 1
-; CHECK-NEXT: store i32 12, i32* [[PIA0B]], align 4
-; CHECK-NEXT: [[PIA1A:%.*]] = getelementptr i32, i32* [[PI]], i64 2
-; CHECK-NEXT: store i32 123, i32* [[PIA1A]], align 4
-; CHECK-NEXT: [[PIA1B:%.*]] = getelementptr i32, i32* [[PI]], i64 3
-; CHECK-NEXT: store i32 1234, i32* [[PIA1B]], align 4
+; CHECK-NEXT: store i32 1, ptr [[PI:%.*]], align 4
+; CHECK-NEXT: [[PIA0B:%.*]] = getelementptr i32, ptr [[PI]], i64 1
+; CHECK-NEXT: store i32 12, ptr [[PIA0B]], align 4
+; CHECK-NEXT: [[PIA1A:%.*]] = getelementptr i32, ptr [[PI]], i64 2
+; CHECK-NEXT: store i32 123, ptr [[PIA1A]], align 4
+; CHECK-NEXT: [[PIA1B:%.*]] = getelementptr i32, ptr [[PI]], i64 3
+; CHECK-NEXT: store i32 1234, ptr [[PIA1B]], align 4
; CHECK-NEXT: ret void
;
; Fold atoi(a[0].a) to 1.
- %pa0a = getelementptr [2 x %struct.A], [2 x %struct.A]* @a, i64 0, i64 0, i32 0, i64 0
- %ia0a = call i32 @atoi(i8* %pa0a)
- %pia0a = getelementptr i32, i32* %pi, i32 0
- store i32 %ia0a, i32* %pia0a
+ %ia0a = call i32 @atoi(ptr @a)
+ store i32 %ia0a, ptr %pi
; Fold atoi(a[0].b) to 12.
- %pa0b = getelementptr [2 x %struct.A], [2 x %struct.A]* @a, i64 0, i64 0, i32 1, i64 0
- %ia0b = call i32 @atoi(i8* %pa0b)
- %pia0b = getelementptr i32, i32* %pi, i32 1
- store i32 %ia0b, i32* %pia0b
+ %pa0b = getelementptr [2 x %struct.A], ptr @a, i64 0, i64 0, i32 1, i64 0
+ %ia0b = call i32 @atoi(ptr %pa0b)
+ %pia0b = getelementptr i32, ptr %pi, i32 1
+ store i32 %ia0b, ptr %pia0b
; Fold atoi(a[1].a) to 123.
- %pa1a = getelementptr [2 x %struct.A], [2 x %struct.A]* @a, i64 0, i64 1, i32 0, i64 0
- %ia1a = call i32 @atoi(i8* %pa1a)
- %pia1a = getelementptr i32, i32* %pi, i32 2
- store i32 %ia1a, i32* %pia1a
+ %pa1a = getelementptr [2 x %struct.A], ptr @a, i64 0, i64 1, i32 0, i64 0
+ %ia1a = call i32 @atoi(ptr %pa1a)
+ %pia1a = getelementptr i32, ptr %pi, i32 2
+ store i32 %ia1a, ptr %pia1a
; Fold atoi(a[1].b) to 1234.
- %pa1b = getelementptr [2 x %struct.A], [2 x %struct.A]* @a, i64 0, i64 1, i32 1, i64 0
- %ia1b = call i32 @atoi(i8* %pa1b)
- %pia1b = getelementptr i32, i32* %pi, i32 3
- store i32 %ia1b, i32* %pia1b
+ %pa1b = getelementptr [2 x %struct.A], ptr @a, i64 0, i64 1, i32 1, i64 0
+ %ia1b = call i32 @atoi(ptr %pa1b)
+ %pia1b = getelementptr i32, ptr %pi, i32 3
+ store i32 %ia1b, ptr %pia1b
ret void
}
; the undefined calls the folder would have to differentiate between
; the empty string an out-of-bounds pointer.
-define void @fold_atoi_offset_out_of_bounds(i32* %pi) {
+define void @fold_atoi_offset_out_of_bounds(ptr %pi) {
; CHECK-LABEL: @fold_atoi_offset_out_of_bounds(
; TODO: Check folding.
;
; Fold atoi((const char*)a + sizeof a) to zero.
- %ia_0_0_32 = call i32 @atoi(i8* getelementptr inbounds ([2 x %struct.A], [2 x %struct.A]* @a, i64 1, i64 0, i32 0, i64 0))
- %pia_0_0_32 = getelementptr i32, i32* %pi, i32 0
- store i32 %ia_0_0_32, i32* %pia_0_0_32
+ %ia_0_0_32 = call i32 @atoi(ptr getelementptr inbounds ([2 x %struct.A], ptr @a, i64 1, i64 0, i32 0, i64 0))
+ store i32 %ia_0_0_32, ptr %pi
; Likewise, fold atoi((const char*)a + sizeof a + 1) to zero.
- %pa_0_0_33 = getelementptr [2 x %struct.A], [2 x %struct.A]* @a, i64 0, i64 0, i32 0, i64 33
- %ia_0_0_33 = call i32 @atoi(i8* %pa_0_0_33)
- %pia_0_0_33 = getelementptr i32, i32* %pi, i32 0
- store i32 %ia_0_0_33, i32* %pia_0_0_33
+ %pa_0_0_33 = getelementptr [2 x %struct.A], ptr @a, i64 0, i64 0, i32 0, i64 33
+ %ia_0_0_33 = call i32 @atoi(ptr %pa_0_0_33)
+ store i32 %ia_0_0_33, ptr %pi
ret void
}
; Fold atol(a[I].M) for constant I in [0, 1] and member M in [a, b, c]
; to a constant.
-define void @fold_atol_member(i64* %pi) {
+define void @fold_atol_member(ptr %pi) {
; CHECK-LABEL: @fold_atol_member(
-; CHECK-NEXT: store i64 1, i64* [[PI:%.*]], align 4
-; CHECK-NEXT: [[PIA0B:%.*]] = getelementptr i64, i64* [[PI]], i64 1
-; CHECK-NEXT: store i64 12, i64* [[PIA0B]], align 4
-; CHECK-NEXT: [[PIA0C:%.*]] = getelementptr i64, i64* [[PI]], i64 2
-; CHECK-NEXT: store i64 56789, i64* [[PIA0C]], align 4
-; CHECK-NEXT: [[PIA1A:%.*]] = getelementptr i64, i64* [[PI]], i64 3
-; CHECK-NEXT: store i64 123, i64* [[PIA1A]], align 4
-; CHECK-NEXT: [[PIA1B:%.*]] = getelementptr i64, i64* [[PI]], i64 4
-; CHECK-NEXT: store i64 1234, i64* [[PIA1B]], align 4
-; CHECK-NEXT: [[PIA1C:%.*]] = getelementptr i64, i64* [[PI]], i64 5
-; CHECK-NEXT: store i64 67890, i64* [[PIA1C]], align 4
+; CHECK-NEXT: store i64 1, ptr [[PI:%.*]], align 4
+; CHECK-NEXT: [[PIA0B:%.*]] = getelementptr i64, ptr [[PI]], i64 1
+; CHECK-NEXT: store i64 12, ptr [[PIA0B]], align 4
+; CHECK-NEXT: [[PIA0C:%.*]] = getelementptr i64, ptr [[PI]], i64 2
+; CHECK-NEXT: store i64 56789, ptr [[PIA0C]], align 4
+; CHECK-NEXT: [[PIA1A:%.*]] = getelementptr i64, ptr [[PI]], i64 3
+; CHECK-NEXT: store i64 123, ptr [[PIA1A]], align 4
+; CHECK-NEXT: [[PIA1B:%.*]] = getelementptr i64, ptr [[PI]], i64 4
+; CHECK-NEXT: store i64 1234, ptr [[PIA1B]], align 4
+; CHECK-NEXT: [[PIA1C:%.*]] = getelementptr i64, ptr [[PI]], i64 5
+; CHECK-NEXT: store i64 67890, ptr [[PIA1C]], align 4
; CHECK-NEXT: ret void
;
; Fold atol(a[0].a) to 1.
- %pa0a = getelementptr [2 x %struct.A], [2 x %struct.A]* @a, i64 0, i64 0, i32 0, i64 0
- %ia0a = call i64 @atol(i8* %pa0a)
- %pia0a = getelementptr i64, i64* %pi, i32 0
- store i64 %ia0a, i64* %pia0a
+ %ia0a = call i64 @atol(ptr @a)
+ store i64 %ia0a, ptr %pi
; Fold atol(a[0].b) to 12.
- %pa0b = getelementptr [2 x %struct.A], [2 x %struct.A]* @a, i64 0, i64 0, i32 1, i64 0
- %ia0b = call i64 @atol(i8* %pa0b)
- %pia0b = getelementptr i64, i64* %pi, i32 1
- store i64 %ia0b, i64* %pia0b
+ %pa0b = getelementptr [2 x %struct.A], ptr @a, i64 0, i64 0, i32 1, i64 0
+ %ia0b = call i64 @atol(ptr %pa0b)
+ %pia0b = getelementptr i64, ptr %pi, i32 1
+ store i64 %ia0b, ptr %pia0b
; Fold atol(a[0].c) to 56789.
- %pa0c = getelementptr [2 x %struct.A], [2 x %struct.A]* @a, i64 0, i64 0, i32 2, i64 0
- %ia0c = call i64 @atol(i8* %pa0c)
- %pia0c = getelementptr i64, i64* %pi, i32 2
- store i64 %ia0c, i64* %pia0c
+ %pa0c = getelementptr [2 x %struct.A], ptr @a, i64 0, i64 0, i32 2, i64 0
+ %ia0c = call i64 @atol(ptr %pa0c)
+ %pia0c = getelementptr i64, ptr %pi, i32 2
+ store i64 %ia0c, ptr %pia0c
; Fold atol(a[1].a) to 123.
- %pa1a = getelementptr [2 x %struct.A], [2 x %struct.A]* @a, i64 0, i64 1, i32 0, i64 0
- %ia1a = call i64 @atol(i8* %pa1a)
- %pia1a = getelementptr i64, i64* %pi, i32 3
- store i64 %ia1a, i64* %pia1a
+ %pa1a = getelementptr [2 x %struct.A], ptr @a, i64 0, i64 1, i32 0, i64 0
+ %ia1a = call i64 @atol(ptr %pa1a)
+ %pia1a = getelementptr i64, ptr %pi, i32 3
+ store i64 %ia1a, ptr %pia1a
; Fold atol(a[1].b) to 1234.
- %pa1b = getelementptr [2 x %struct.A], [2 x %struct.A]* @a, i64 0, i64 1, i32 1, i64 0
- %ia1b = call i64 @atol(i8* %pa1b)
- %pia1b = getelementptr i64, i64* %pi, i32 4
- store i64 %ia1b, i64* %pia1b
+ %pa1b = getelementptr [2 x %struct.A], ptr @a, i64 0, i64 1, i32 1, i64 0
+ %ia1b = call i64 @atol(ptr %pa1b)
+ %pia1b = getelementptr i64, ptr %pi, i32 4
+ store i64 %ia1b, ptr %pia1b
; Fold atol(a[1].c) to 67890.
- %pa1c = getelementptr [2 x %struct.A], [2 x %struct.A]* @a, i64 0, i64 1, i32 2, i64 0
- %ia1c = call i64 @atol(i8* %pa1c)
- %pia1c = getelementptr i64, i64* %pi, i32 5
- store i64 %ia1c, i64* %pia1c
+ %pa1c = getelementptr [2 x %struct.A], ptr @a, i64 0, i64 1, i32 2, i64 0
+ %ia1c = call i64 @atol(ptr %pa1c)
+ %pia1c = getelementptr i64, ptr %pi, i32 5
+ store i64 %ia1c, ptr %pia1c
ret void
}
; Fold atoll(a[I].M + C) for constant I in [0, 1], member M in [a, b, c],
; and C in a valid range to a constant.
-define void @fold_atoll_member_pC(i64* %pi) {
+define void @fold_atoll_member_pC(ptr %pi) {
; CHECK-LABEL: @fold_atoll_member_pC(
-; CHECK-NEXT: store i64 1, i64* [[PI:%.*]], align 4
-; CHECK-NEXT: [[PIA0BP1:%.*]] = getelementptr i64, i64* [[PI]], i64 1
-; CHECK-NEXT: store i64 2, i64* [[PIA0BP1]], align 4
-; CHECK-NEXT: [[PIA0CP3:%.*]] = getelementptr i64, i64* [[PI]], i64 2
-; CHECK-NEXT: store i64 89, i64* [[PIA0CP3]], align 4
-; CHECK-NEXT: [[PIA1AP2:%.*]] = getelementptr i64, i64* [[PI]], i64 3
-; CHECK-NEXT: store i64 3, i64* [[PIA1AP2]], align 4
-; CHECK-NEXT: [[PIA1BP3:%.*]] = getelementptr i64, i64* [[PI]], i64 4
-; CHECK-NEXT: store i64 4, i64* [[PIA1BP3]], align 4
-; CHECK-NEXT: [[PIA1CP4:%.*]] = getelementptr i64, i64* [[PI]], i64 5
-; CHECK-NEXT: store i64 0, i64* [[PIA1CP4]], align 4
+; CHECK-NEXT: store i64 1, ptr [[PI:%.*]], align 4
+; CHECK-NEXT: [[PIA0BP1:%.*]] = getelementptr i64, ptr [[PI]], i64 1
+; CHECK-NEXT: store i64 2, ptr [[PIA0BP1]], align 4
+; CHECK-NEXT: [[PIA0CP3:%.*]] = getelementptr i64, ptr [[PI]], i64 2
+; CHECK-NEXT: store i64 89, ptr [[PIA0CP3]], align 4
+; CHECK-NEXT: [[PIA1AP2:%.*]] = getelementptr i64, ptr [[PI]], i64 3
+; CHECK-NEXT: store i64 3, ptr [[PIA1AP2]], align 4
+; CHECK-NEXT: [[PIA1BP3:%.*]] = getelementptr i64, ptr [[PI]], i64 4
+; CHECK-NEXT: store i64 4, ptr [[PIA1BP3]], align 4
+; CHECK-NEXT: [[PIA1CP4:%.*]] = getelementptr i64, ptr [[PI]], i64 5
+; CHECK-NEXT: store i64 0, ptr [[PIA1CP4]], align 4
; CHECK-NEXT: ret void
;
; Fold atoll(a[0].a) to 1.
- %pa0a = getelementptr [2 x %struct.A], [2 x %struct.A]* @a, i64 0, i64 0, i32 0, i64 0
- %ia0a = call i64 @atol(i8* %pa0a)
- %pia0a = getelementptr i64, i64* %pi, i32 0
- store i64 %ia0a, i64* %pia0a
+ %ia0a = call i64 @atol(ptr @a)
+ store i64 %ia0a, ptr %pi
; Fold atoll(a[0].b + 1) to 2.
- %pa0bp1 = getelementptr [2 x %struct.A], [2 x %struct.A]* @a, i64 0, i64 0, i32 1, i64 1
- %ia0bp1 = call i64 @atol(i8* %pa0bp1)
- %pia0bp1 = getelementptr i64, i64* %pi, i32 1
- store i64 %ia0bp1, i64* %pia0bp1
+ %pa0bp1 = getelementptr [2 x %struct.A], ptr @a, i64 0, i64 0, i32 1, i64 1
+ %ia0bp1 = call i64 @atol(ptr %pa0bp1)
+ %pia0bp1 = getelementptr i64, ptr %pi, i32 1
+ store i64 %ia0bp1, ptr %pia0bp1
; Fold atoll(a[0].c + 3) to 89.
- %pa0cp3 = getelementptr [2 x %struct.A], [2 x %struct.A]* @a, i64 0, i64 0, i32 2, i64 3
- %ia0cp3 = call i64 @atol(i8* %pa0cp3)
- %pia0cp3 = getelementptr i64, i64* %pi, i32 2
- store i64 %ia0cp3, i64* %pia0cp3
+ %pa0cp3 = getelementptr [2 x %struct.A], ptr @a, i64 0, i64 0, i32 2, i64 3
+ %ia0cp3 = call i64 @atol(ptr %pa0cp3)
+ %pia0cp3 = getelementptr i64, ptr %pi, i32 2
+ store i64 %ia0cp3, ptr %pia0cp3
; Fold atoll(a[1].a + 2) to 3.
- %pa1ap2 = getelementptr [2 x %struct.A], [2 x %struct.A]* @a, i64 0, i64 1, i32 0, i64 2
- %ia1ap2 = call i64 @atol(i8* %pa1ap2)
- %pia1ap2 = getelementptr i64, i64* %pi, i32 3
- store i64 %ia1ap2, i64* %pia1ap2
+ %pa1ap2 = getelementptr [2 x %struct.A], ptr @a, i64 0, i64 1, i32 0, i64 2
+ %ia1ap2 = call i64 @atol(ptr %pa1ap2)
+ %pia1ap2 = getelementptr i64, ptr %pi, i32 3
+ store i64 %ia1ap2, ptr %pia1ap2
; Fold atoll(a[1].b + 3) to 4.
- %pa1bp3 = getelementptr [2 x %struct.A], [2 x %struct.A]* @a, i64 0, i64 1, i32 1, i64 3
- %ia1bp3 = call i64 @atol(i8* %pa1bp3)
- %pia1bp3 = getelementptr i64, i64* %pi, i32 4
- store i64 %ia1bp3, i64* %pia1bp3
+ %pa1bp3 = getelementptr [2 x %struct.A], ptr @a, i64 0, i64 1, i32 1, i64 3
+ %ia1bp3 = call i64 @atol(ptr %pa1bp3)
+ %pia1bp3 = getelementptr i64, ptr %pi, i32 4
+ store i64 %ia1bp3, ptr %pia1bp3
; Fold atoll(a[1].c + 4) to 0.
- %pa1cp4 = getelementptr [2 x %struct.A], [2 x %struct.A]* @a, i64 0, i64 1, i32 2, i64 4
- %ia1cp4 = call i64 @atol(i8* %pa1cp4)
- %pia1cp4 = getelementptr i64, i64* %pi, i32 5
- store i64 %ia1cp4, i64* %pia1cp4
+ %pa1cp4 = getelementptr [2 x %struct.A], ptr @a, i64 0, i64 1, i32 2, i64 4
+ %ia1cp4 = call i64 @atol(ptr %pa1cp4)
+ %pia1cp4 = getelementptr i64, ptr %pi, i32 5
+ store i64 %ia1cp4, ptr %pia1cp4
ret void
}
; Fold strtol(a[I].M + C, 0, 0) for constant I in [0, 1], member M in [a, b, c],
; and C in a valid range to a constant.
-define void @fold_strtol_member_pC(i64* %pi) {
+define void @fold_strtol_member_pC(ptr %pi) {
; CHECK-LABEL: @fold_strtol_member_pC(
-; CHECK-NEXT: store i64 1, i64* [[PI:%.*]], align 4
-; CHECK-NEXT: [[PIA0BP1:%.*]] = getelementptr i64, i64* [[PI]], i64 1
-; CHECK-NEXT: store i64 2, i64* [[PIA0BP1]], align 4
-; CHECK-NEXT: [[PIA0CP3:%.*]] = getelementptr i64, i64* [[PI]], i64 2
-; CHECK-NEXT: store i64 89, i64* [[PIA0CP3]], align 4
-; CHECK-NEXT: [[PIA1AP2:%.*]] = getelementptr i64, i64* [[PI]], i64 3
-; CHECK-NEXT: store i64 3, i64* [[PIA1AP2]], align 4
-; CHECK-NEXT: [[PIA1BP3:%.*]] = getelementptr i64, i64* [[PI]], i64 4
-; CHECK-NEXT: store i64 4, i64* [[PIA1BP3]], align 4
-; CHECK-NEXT: [[PIA1CP4:%.*]] = getelementptr i64, i64* [[PI]], i64 5
-; CHECK-NEXT: store i64 0, i64* [[PIA1CP4]], align 4
+; CHECK-NEXT: store i64 1, ptr [[PI:%.*]], align 4
+; CHECK-NEXT: [[PIA0BP1:%.*]] = getelementptr i64, ptr [[PI]], i64 1
+; CHECK-NEXT: store i64 2, ptr [[PIA0BP1]], align 4
+; CHECK-NEXT: [[PIA0CP3:%.*]] = getelementptr i64, ptr [[PI]], i64 2
+; CHECK-NEXT: store i64 89, ptr [[PIA0CP3]], align 4
+; CHECK-NEXT: [[PIA1AP2:%.*]] = getelementptr i64, ptr [[PI]], i64 3
+; CHECK-NEXT: store i64 3, ptr [[PIA1AP2]], align 4
+; CHECK-NEXT: [[PIA1BP3:%.*]] = getelementptr i64, ptr [[PI]], i64 4
+; CHECK-NEXT: store i64 4, ptr [[PIA1BP3]], align 4
+; CHECK-NEXT: [[PIA1CP4:%.*]] = getelementptr i64, ptr [[PI]], i64 5
+; CHECK-NEXT: store i64 0, ptr [[PIA1CP4]], align 4
; CHECK-NEXT: ret void
;
; Fold strtol(a[0].a, 0, 0) to 1.
- %pa0a = getelementptr [2 x %struct.A], [2 x %struct.A]* @a, i64 0, i64 0, i32 0, i64 0
- %ia0a = call i64 @strtol(i8* %pa0a, i8** null, i32 0)
- %pia0a = getelementptr i64, i64* %pi, i32 0
- store i64 %ia0a, i64* %pia0a
-
-; Fold strtol(a[0].b + 1, 0, 0, i8** null, i32 0) to 2.
- %pa0bp1 = getelementptr [2 x %struct.A], [2 x %struct.A]* @a, i64 0, i64 0, i32 1, i64 1
- %ia0bp1 = call i64 @strtol(i8* %pa0bp1, i8** null, i32 0)
- %pia0bp1 = getelementptr i64, i64* %pi, i32 1
- store i64 %ia0bp1, i64* %pia0bp1
-
-; Fold strtol(a[0].c + 3, 0, 0, i8** null, i32 0) to 89.
- %pa0cp3 = getelementptr [2 x %struct.A], [2 x %struct.A]* @a, i64 0, i64 0, i32 2, i64 3
- %ia0cp3 = call i64 @strtol(i8* %pa0cp3, i8** null, i32 0)
- %pia0cp3 = getelementptr i64, i64* %pi, i32 2
- store i64 %ia0cp3, i64* %pia0cp3
-
-; Fold strtol(a[1].a + 2, 0, 0, i8** null, i32 0) to 3.
- %pa1ap2 = getelementptr [2 x %struct.A], [2 x %struct.A]* @a, i64 0, i64 1, i32 0, i64 2
- %ia1ap2 = call i64 @strtol(i8* %pa1ap2, i8** null, i32 0)
- %pia1ap2 = getelementptr i64, i64* %pi, i32 3
- store i64 %ia1ap2, i64* %pia1ap2
-
-; Fold strtol(a[1].b + 3, 0, 0, i8** null, i32 0) to 4.
- %pa1bp3 = getelementptr [2 x %struct.A], [2 x %struct.A]* @a, i64 0, i64 1, i32 1, i64 3
- %ia1bp3 = call i64 @strtol(i8* %pa1bp3, i8** null, i32 0)
- %pia1bp3 = getelementptr i64, i64* %pi, i32 4
- store i64 %ia1bp3, i64* %pia1bp3
-
-; Fold strtol(a[1].c + 4, 0, 0, i8** null, i32 0) to 0.
- %pa1cp4 = getelementptr [2 x %struct.A], [2 x %struct.A]* @a, i64 0, i64 1, i32 2, i64 4
- %ia1cp4 = call i64 @strtol(i8* %pa1cp4, i8** null, i32 0)
- %pia1cp4 = getelementptr i64, i64* %pi, i32 5
- store i64 %ia1cp4, i64* %pia1cp4
+ %ia0a = call i64 @strtol(ptr @a, ptr null, i32 0)
+ store i64 %ia0a, ptr %pi
+
+; Fold strtol(a[0].b + 1, 0, 0, ptr null, i32 0) to 2.
+ %pa0bp1 = getelementptr [2 x %struct.A], ptr @a, i64 0, i64 0, i32 1, i64 1
+ %ia0bp1 = call i64 @strtol(ptr %pa0bp1, ptr null, i32 0)
+ %pia0bp1 = getelementptr i64, ptr %pi, i32 1
+ store i64 %ia0bp1, ptr %pia0bp1
+
+; Fold strtol(a[0].c + 3, 0, 0, ptr null, i32 0) to 89.
+ %pa0cp3 = getelementptr [2 x %struct.A], ptr @a, i64 0, i64 0, i32 2, i64 3
+ %ia0cp3 = call i64 @strtol(ptr %pa0cp3, ptr null, i32 0)
+ %pia0cp3 = getelementptr i64, ptr %pi, i32 2
+ store i64 %ia0cp3, ptr %pia0cp3
+
+; Fold strtol(a[1].a + 2, 0, 0, ptr null, i32 0) to 3.
+ %pa1ap2 = getelementptr [2 x %struct.A], ptr @a, i64 0, i64 1, i32 0, i64 2
+ %ia1ap2 = call i64 @strtol(ptr %pa1ap2, ptr null, i32 0)
+ %pia1ap2 = getelementptr i64, ptr %pi, i32 3
+ store i64 %ia1ap2, ptr %pia1ap2
+
+; Fold strtol(a[1].b + 3, 0, 0, ptr null, i32 0) to 4.
+ %pa1bp3 = getelementptr [2 x %struct.A], ptr @a, i64 0, i64 1, i32 1, i64 3
+ %ia1bp3 = call i64 @strtol(ptr %pa1bp3, ptr null, i32 0)
+ %pia1bp3 = getelementptr i64, ptr %pi, i32 4
+ store i64 %ia1bp3, ptr %pia1bp3
+
+; Fold strtol(a[1].c + 4, 0, 0, ptr null, i32 0) to 0.
+ %pa1cp4 = getelementptr [2 x %struct.A], ptr @a, i64 0, i64 1, i32 2, i64 4
+ %ia1cp4 = call i64 @strtol(ptr %pa1cp4, ptr null, i32 0)
+ %pia1cp4 = getelementptr i64, ptr %pi, i32 5
+ store i64 %ia1cp4, ptr %pia1cp4
ret void
}
; Fold strtoll(a[I].M + C, 0, 0) for constant I in [0, 1], member M
; in [a, b, c], and C in a valid range to a constant.
-define void @fold_strtoll_member_pC(i64* %pi) {
+define void @fold_strtoll_member_pC(ptr %pi) {
; CHECK-LABEL: @fold_strtoll_member_pC(
-; CHECK-NEXT: store i64 1, i64* [[PI:%.*]], align 4
-; CHECK-NEXT: [[PIA0BP1:%.*]] = getelementptr i64, i64* [[PI]], i64 1
-; CHECK-NEXT: store i64 2, i64* [[PIA0BP1]], align 4
-; CHECK-NEXT: [[PIA0CP3:%.*]] = getelementptr i64, i64* [[PI]], i64 2
-; CHECK-NEXT: store i64 89, i64* [[PIA0CP3]], align 4
-; CHECK-NEXT: [[PIA1AP2:%.*]] = getelementptr i64, i64* [[PI]], i64 3
-; CHECK-NEXT: store i64 3, i64* [[PIA1AP2]], align 4
-; CHECK-NEXT: [[PIA1BP3:%.*]] = getelementptr i64, i64* [[PI]], i64 4
-; CHECK-NEXT: store i64 4, i64* [[PIA1BP3]], align 4
-; CHECK-NEXT: [[PIA1CP4:%.*]] = getelementptr i64, i64* [[PI]], i64 5
-; CHECK-NEXT: store i64 0, i64* [[PIA1CP4]], align 4
+; CHECK-NEXT: store i64 1, ptr [[PI:%.*]], align 4
+; CHECK-NEXT: [[PIA0BP1:%.*]] = getelementptr i64, ptr [[PI]], i64 1
+; CHECK-NEXT: store i64 2, ptr [[PIA0BP1]], align 4
+; CHECK-NEXT: [[PIA0CP3:%.*]] = getelementptr i64, ptr [[PI]], i64 2
+; CHECK-NEXT: store i64 89, ptr [[PIA0CP3]], align 4
+; CHECK-NEXT: [[PIA1AP2:%.*]] = getelementptr i64, ptr [[PI]], i64 3
+; CHECK-NEXT: store i64 3, ptr [[PIA1AP2]], align 4
+; CHECK-NEXT: [[PIA1BP3:%.*]] = getelementptr i64, ptr [[PI]], i64 4
+; CHECK-NEXT: store i64 4, ptr [[PIA1BP3]], align 4
+; CHECK-NEXT: [[PIA1CP4:%.*]] = getelementptr i64, ptr [[PI]], i64 5
+; CHECK-NEXT: store i64 0, ptr [[PIA1CP4]], align 4
; CHECK-NEXT: ret void
;
; Fold strtoll(a[0].a, 0, 0) to 1.
- %pa0a = getelementptr [2 x %struct.A], [2 x %struct.A]* @a, i64 0, i64 0, i32 0, i64 0
- %ia0a = call i64 @strtoll(i8* %pa0a, i8** null, i32 0)
- %pia0a = getelementptr i64, i64* %pi, i32 0
- store i64 %ia0a, i64* %pia0a
-
-; Fold strtoll(a[0].b + 1, 0, 0, i8** null, i32 0) to 2.
- %pa0bp1 = getelementptr [2 x %struct.A], [2 x %struct.A]* @a, i64 0, i64 0, i32 1, i64 1
- %ia0bp1 = call i64 @strtoll(i8* %pa0bp1, i8** null, i32 0)
- %pia0bp1 = getelementptr i64, i64* %pi, i32 1
- store i64 %ia0bp1, i64* %pia0bp1
-
-; Fold strtoll(a[0].c + 3, 0, 0, i8** null, i32 0) to 89.
- %pa0cp3 = getelementptr [2 x %struct.A], [2 x %struct.A]* @a, i64 0, i64 0, i32 2, i64 3
- %ia0cp3 = call i64 @strtoll(i8* %pa0cp3, i8** null, i32 0)
- %pia0cp3 = getelementptr i64, i64* %pi, i32 2
- store i64 %ia0cp3, i64* %pia0cp3
-
-; Fold strtoll(a[1].a + 2, 0, 0, i8** null, i32 0) to 3.
- %pa1ap2 = getelementptr [2 x %struct.A], [2 x %struct.A]* @a, i64 0, i64 1, i32 0, i64 2
- %ia1ap2 = call i64 @strtoll(i8* %pa1ap2, i8** null, i32 0)
- %pia1ap2 = getelementptr i64, i64* %pi, i32 3
- store i64 %ia1ap2, i64* %pia1ap2
-
-; Fold strtoll(a[1].b + 3, 0, 0, i8** null, i32 0) to 4.
- %pa1bp3 = getelementptr [2 x %struct.A], [2 x %struct.A]* @a, i64 0, i64 1, i32 1, i64 3
- %ia1bp3 = call i64 @strtoll(i8* %pa1bp3, i8** null, i32 0)
- %pia1bp3 = getelementptr i64, i64* %pi, i32 4
- store i64 %ia1bp3, i64* %pia1bp3
-
-; Fold strtoll(a[1].c + 4, 0, 0, i8** null, i32 0) to 0.
- %pa1cp4 = getelementptr [2 x %struct.A], [2 x %struct.A]* @a, i64 0, i64 1, i32 2, i64 4
- %ia1cp4 = call i64 @strtoll(i8* %pa1cp4, i8** null, i32 0)
- %pia1cp4 = getelementptr i64, i64* %pi, i32 5
- store i64 %ia1cp4, i64* %pia1cp4
+ %ia0a = call i64 @strtoll(ptr @a, ptr null, i32 0)
+ store i64 %ia0a, ptr %pi
+
+; Fold strtoll(a[0].b + 1, 0, 0, ptr null, i32 0) to 2.
+ %pa0bp1 = getelementptr [2 x %struct.A], ptr @a, i64 0, i64 0, i32 1, i64 1
+ %ia0bp1 = call i64 @strtoll(ptr %pa0bp1, ptr null, i32 0)
+ %pia0bp1 = getelementptr i64, ptr %pi, i32 1
+ store i64 %ia0bp1, ptr %pia0bp1
+
+; Fold strtoll(a[0].c + 3, 0, 0, ptr null, i32 0) to 89.
+ %pa0cp3 = getelementptr [2 x %struct.A], ptr @a, i64 0, i64 0, i32 2, i64 3
+ %ia0cp3 = call i64 @strtoll(ptr %pa0cp3, ptr null, i32 0)
+ %pia0cp3 = getelementptr i64, ptr %pi, i32 2
+ store i64 %ia0cp3, ptr %pia0cp3
+
+; Fold strtoll(a[1].a + 2, 0, 0, ptr null, i32 0) to 3.
+ %pa1ap2 = getelementptr [2 x %struct.A], ptr @a, i64 0, i64 1, i32 0, i64 2
+ %ia1ap2 = call i64 @strtoll(ptr %pa1ap2, ptr null, i32 0)
+ %pia1ap2 = getelementptr i64, ptr %pi, i32 3
+ store i64 %ia1ap2, ptr %pia1ap2
+
+; Fold strtoll(a[1].b + 3, 0, 0, ptr null, i32 0) to 4.
+ %pa1bp3 = getelementptr [2 x %struct.A], ptr @a, i64 0, i64 1, i32 1, i64 3
+ %ia1bp3 = call i64 @strtoll(ptr %pa1bp3, ptr null, i32 0)
+ %pia1bp3 = getelementptr i64, ptr %pi, i32 4
+ store i64 %ia1bp3, ptr %pia1bp3
+
+; Fold strtoll(a[1].c + 4, 0, 0, ptr null, i32 0) to 0.
+ %pa1cp4 = getelementptr [2 x %struct.A], ptr @a, i64 0, i64 1, i32 2, i64 4
+ %ia1cp4 = call i64 @strtoll(ptr %pa1cp4, ptr null, i32 0)
+ %pia1cp4 = getelementptr i64, ptr %pi, i32 5
+ store i64 %ia1cp4, ptr %pia1cp4
ret void
}
; CHECK-NEXT: store ptr [[LLR]], ptr [[PS2]], align 8
; CHECK-NEXT: ret void
;
- %p = getelementptr [2 x i8], [2 x i8]* @a, i32 0, i32 0
- %ir = call ptr @atoi(ptr %p)
- %ps0 = getelementptr ptr, ptr %ps, i32 0
- store ptr %ir, ptr %ps0
+ %ir = call ptr @atoi(ptr @a)
+ store ptr %ir, ptr %ps
- %lr = call ptr @atol(ptr %p)
+ %lr = call ptr @atol(ptr @a)
%ps1 = getelementptr ptr, ptr %ps, i32 1
store ptr %lr, ptr %ps1
- %llr = call ptr @atol(ptr %p)
+ %llr = call ptr @atol(ptr @a)
%ps2 = getelementptr ptr, ptr %ps, i32 2
store ptr %llr, ptr %ps2
; CHECK-NEXT: [[CMP:%.*]] = call ptr @strncasecmp(ptr nonnull @a, ptr getelementptr inbounds ([2 x i8], ptr @a, i64 0, i64 1))
; CHECK-NEXT: ret ptr [[CMP]]
;
- %p0 = getelementptr [2 x i8], [2 x i8]* @a, i32 0, i32 0
- %p1 = getelementptr [2 x i8], [2 x i8]* @a, i32 0, i32 1
- %cmp = call ptr @strncasecmp(ptr %p0, ptr %p1)
+ %p1 = getelementptr [2 x i8], ptr @a, i32 0, i32 1
+ %cmp = call ptr @strncasecmp(ptr @a, ptr %p1)
ret ptr %cmp
}
; CHECK-NEXT: [[I:%.*]] = call i1 @strcoll(ptr nonnull @a, ptr getelementptr inbounds ([2 x i8], ptr @a, i64 0, i64 1), ptr nonnull @a)
; CHECK-NEXT: ret i1 [[I]]
;
- %p0 = getelementptr [2 x i8], [2 x i8]* @a, i32 0, i32 0
- %p1 = getelementptr [2 x i8], [2 x i8]* @a, i32 0, i32 1
- %i = call i1 @strcoll(ptr %p0, ptr %p1, ptr %p0)
+ %p1 = getelementptr [2 x i8], ptr @a, i32 0, i32 1
+ %i = call i1 @strcoll(ptr @a, ptr %p1, ptr @a)
ret i1 %i
}
; CHECK-NEXT: [[D:%.*]] = call ptr @strndup(ptr nonnull @a)
; CHECK-NEXT: ret ptr [[D]]
;
- %p = getelementptr [2 x i8], [2 x i8]* @a, i32 0, i32 0
- %d = call ptr @strndup(ptr %p)
+ %d = call ptr @strndup(ptr @a)
ret ptr %d
}
; CHECK-NEXT: [[RET:%.*]] = call i1 @strtok(ptr nonnull @a, ptr getelementptr inbounds ([2 x i8], ptr @a, i64 0, i64 1), i1 false)
; CHECK-NEXT: ret i1 [[RET]]
;
- %p0 = getelementptr [2 x i8], [2 x i8]* @a, i32 0, i32 0
- %p1 = getelementptr [2 x i8], [2 x i8]* @a, i32 0, i32 1
- %ret = call i1 @strtok(ptr %p0, ptr %p1, i1 0)
+ %p1 = getelementptr [2 x i8], ptr @a, i32 0, i32 1
+ %ret = call i1 @strtok(ptr @a, ptr %p1, i1 0)
ret i1 %ret
}
; CHECK-NEXT: [[RET:%.*]] = call i1 @strtok_r(ptr nonnull @a, ptr getelementptr inbounds ([2 x i8], ptr @a, i64 0, i64 1))
; CHECK-NEXT: ret i1 [[RET]]
;
- %p0 = getelementptr [2 x i8], [2 x i8]* @a, i32 0, i32 0
- %p1 = getelementptr [2 x i8], [2 x i8]* @a, i32 0, i32 1
- %ret = call i1 @strtok_r(ptr %p0, ptr %p1)
+ %p1 = getelementptr [2 x i8], ptr @a, i32 0, i32 1
+ %ret = call i1 @strtok_r(ptr @a, ptr %p1)
ret i1 %ret
}
declare i64 @strtoll(ptr, ptr)
declare i64 @strtoull(ptr, ptr)
-define void @call_bad_strto(i32* %psi32, i64* %psi64) {
+define void @call_bad_strto(ptr %psi32, ptr %psi64) {
; CHECK-LABEL: @call_bad_strto(
; CHECK-NEXT: [[LR:%.*]] = call i32 @strtol(ptr nonnull @a, ptr null)
; CHECK-NEXT: store i32 [[LR]], ptr [[PSI32:%.*]], align 4
; CHECK-NEXT: store i64 [[ULLR]], ptr [[PS3]], align 4
; CHECK-NEXT: ret void
;
- %p = getelementptr [2 x i8], [2 x i8]* @a, i32 0, i32 0
- %lr = call i32 @strtol(ptr %p, ptr null)
- %ps0 = getelementptr i32, i32* %psi32, i32 0
- store i32 %lr, i32* %ps0
+ %lr = call i32 @strtol(ptr @a, ptr null)
+ store i32 %lr, ptr %psi32
- %ulr = call i32 @strtoul(ptr %p, ptr null)
- %ps1 = getelementptr i32, i32* %psi32, i32 1
- store i32 %ulr, i32* %ps1
+ %ulr = call i32 @strtoul(ptr @a, ptr null)
+ %ps1 = getelementptr i32, ptr %psi32, i32 1
+ store i32 %ulr, ptr %ps1
- %llr = call i64 @strtoll(ptr %p, ptr null)
- %ps2 = getelementptr i64, i64* %psi64, i32 0
- store i64 %llr, i64* %ps2
+ %llr = call i64 @strtoll(ptr @a, ptr null)
+ store i64 %llr, ptr %psi64
- %ullr = call i64 @strtoull(ptr %p, ptr null)
- %ps3 = getelementptr i64, i64* %psi64, i32 3
- store i64 %ullr, i64* %ps3
+ %ullr = call i64 @strtoull(ptr @a, ptr null)
+ %ps3 = getelementptr i64, ptr %psi64, i32 3
+ store i64 %ullr, ptr %ps3
ret void
}
; CHECK-NEXT: [[RET:%.*]] = call ptr @strxfrm(ptr nonnull @a, ptr getelementptr inbounds ([2 x i8], ptr @a, i64 0, i64 1))
; CHECK-NEXT: ret ptr [[RET]]
;
- %p0 = getelementptr [2 x i8], [2 x i8]* @a, i32 0, i32 0
- %p1 = getelementptr [2 x i8], [2 x i8]* @a, i32 0, i32 1
- %ret = call ptr @strxfrm(ptr %p0, ptr %p1)
+ %p1 = getelementptr [2 x i8], ptr @a, i32 0, i32 1
+ %ret = call ptr @strxfrm(ptr @a, ptr %p1)
ret ptr %ret
}
@null = constant [1 x i8] zeroinitializer
@null_hello = constant [7 x i8] c"\00hello\00"
-declare i8* @strcat(i8*, i8*)
-declare i32 @puts(i8*)
+declare ptr @strcat(ptr, ptr)
+declare i32 @puts(ptr)
define i32 @main() {
; CHECK-LABEL: @main(
-; CHECK-NOT: call i8* @strcat
+; CHECK-NOT: call ptr @strcat
; CHECK: call i32 @puts
%target = alloca [1024 x i8]
- %arg1 = getelementptr [1024 x i8], [1024 x i8]* %target, i32 0, i32 0
- store i8 0, i8* %arg1
+ store i8 0, ptr %target
; rslt1 = strcat(target, "hello\00")
- %arg2 = getelementptr [6 x i8], [6 x i8]* @hello, i32 0, i32 0
- %rslt1 = call i8* @strcat(i8* %arg1, i8* %arg2)
+ %rslt1 = call ptr @strcat(ptr %target, ptr @hello)
; rslt2 = strcat(rslt1, "\00")
- %arg3 = getelementptr [1 x i8], [1 x i8]* @null, i32 0, i32 0
- %rslt2 = call i8* @strcat(i8* %rslt1, i8* %arg3)
+ %rslt2 = call ptr @strcat(ptr %rslt1, ptr @null)
; rslt3 = strcat(rslt2, "\00hello\00")
- %arg4 = getelementptr [7 x i8], [7 x i8]* @null_hello, i32 0, i32 0
- %rslt3 = call i8* @strcat(i8* %rslt2, i8* %arg4)
+ %rslt3 = call ptr @strcat(ptr %rslt2, ptr @null_hello)
- call i32 @puts( i8* %rslt3 )
+ call i32 @puts( ptr %rslt3 )
ret i32 0
}
@empty = constant [1 x i8] c"\00"
@a = common global [32 x i8] zeroinitializer, align 1
-declare i8* @strcat(i8*, i8*)
+declare ptr @strcat(ptr, ptr)
define void @test_simplify1() {
; CHECK-LABEL: @test_simplify1(
-; CHECK-NOT: call i8* @strcat
+; CHECK-NOT: call ptr @strcat
; CHECK: ret void
- %dst = getelementptr [32 x i8], [32 x i8]* @a, i32 0, i32 0
- %src = getelementptr [6 x i8], [6 x i8]* @hello, i32 0, i32 0
- call i8* @strcat(i8* %dst, i8* %src)
+ call ptr @strcat(ptr @a, ptr @hello)
ret void
}
; CHECK-LABEL: @test_simplify2(
; CHECK-NEXT: ret void
- %dst = getelementptr [32 x i8], [32 x i8]* @a, i32 0, i32 0
- %src = getelementptr [1 x i8], [1 x i8]* @empty, i32 0, i32 0
- call i8* @strcat(i8* %dst, i8* %src)
+ call ptr @strcat(ptr @a, ptr @empty)
ret void
}
@empty = constant [1 x i8] c"\00"
@a = common global [32 x i8] zeroinitializer, align 1
-; Expected type: i8* @strcat(i8*, i8*).
-declare i16 @strcat(i8*, i8*)
+; Expected type: ptr @strcat(ptr, ptr).
+declare i16 @strcat(ptr, ptr)
define void @test_nosimplify1() {
; CHECK-LABEL: @test_nosimplify1(
; CHECK: call i16 @strcat
; CHECK: ret void
- %dst = getelementptr [32 x i8], [32 x i8]* @a, i32 0, i32 0
- %src = getelementptr [6 x i8], [6 x i8]* @hello, i32 0, i32 0
- call i16 @strcat(i8* %dst, i8* %src)
+ call i16 @strcat(ptr @a, ptr @hello)
ret void
}
@hello = constant [14 x i8] c"hello world\5Cn\00"
@chr = global i8 zeroinitializer
-declare i8 @strchr(i8*, i32)
+declare i8 @strchr(ptr, i32)
define void @test_nosimplify1() {
; CHECK: test_nosimplify1
; CHECK: call i8 @strchr
; CHECK: ret void
- %str = getelementptr [14 x i8], [14 x i8]* @hello, i32 0, i32 0
- %dst = call i8 @strchr(i8* %str, i32 119)
- store i8 %dst, i8* @chr
+ %dst = call i8 @strchr(ptr @hello, i32 119)
+ store i8 %dst, ptr @chr
ret void
}
; Verify that the result of strchr calls used in equality expressions
; with either the first argument or null are optimally folded.
-declare i8* @strchr(i8*, i32)
+declare ptr @strchr(ptr, i32)
; Fold strchr(s, c) == s to *s == c.
-define i1 @fold_strchr_s_c_eq_s(i8* %s, i32 %c) {
+define i1 @fold_strchr_s_c_eq_s(ptr %s, i32 %c) {
; CHECK-LABEL: @fold_strchr_s_c_eq_s(
-; CHECK-NEXT: [[TMP1:%.*]] = load i8, i8* [[S:%.*]], align 1
+; CHECK-NEXT: [[TMP1:%.*]] = load i8, ptr [[S:%.*]], align 1
; CHECK-NEXT: [[TMP2:%.*]] = trunc i32 [[C:%.*]] to i8
; CHECK-NEXT: [[CHAR0CMP:%.*]] = icmp eq i8 [[TMP1]], [[TMP2]]
; CHECK-NEXT: ret i1 [[CHAR0CMP]]
;
- %p = call i8* @strchr(i8* %s, i32 %c)
- %cmp = icmp eq i8* %p, %s
+ %p = call ptr @strchr(ptr %s, i32 %c)
+ %cmp = icmp eq ptr %p, %s
ret i1 %cmp
}
; Fold strchr(s, c) != s to *s != c.
-define i1 @fold_strchr_s_c_neq_s(i8* %s, i32 %c) {
+define i1 @fold_strchr_s_c_neq_s(ptr %s, i32 %c) {
; CHECK-LABEL: @fold_strchr_s_c_neq_s(
-; CHECK-NEXT: [[TMP1:%.*]] = load i8, i8* [[S:%.*]], align 1
+; CHECK-NEXT: [[TMP1:%.*]] = load i8, ptr [[S:%.*]], align 1
; CHECK-NEXT: [[TMP2:%.*]] = trunc i32 [[C:%.*]] to i8
; CHECK-NEXT: [[CHAR0CMP:%.*]] = icmp ne i8 [[TMP1]], [[TMP2]]
; CHECK-NEXT: ret i1 [[CHAR0CMP]]
;
- %p = call i8* @strchr(i8* %s, i32 %c)
- %cmp = icmp ne i8* %p, %s
+ %p = call ptr @strchr(ptr %s, i32 %c)
+ %cmp = icmp ne ptr %p, %s
ret i1 %cmp
}
; Fold strchr(s, '\0') == null to false. (A string must be nul-terminated,
; otherwise the call would read past the end of the array.)
-define i1 @fold_strchr_s_nul_eqz(i8* %s) {
+define i1 @fold_strchr_s_nul_eqz(ptr %s) {
; CHECK-LABEL: @fold_strchr_s_nul_eqz(
; CHECK-NEXT: ret i1 false
;
- %p = call i8* @strchr(i8* %s, i32 0)
- %cmp = icmp eq i8* %p, null
+ %p = call ptr @strchr(ptr %s, i32 0)
+ %cmp = icmp eq ptr %p, null
ret i1 %cmp
}
; Fold strchr(s, '\0') != null to true.
-define i1 @fold_strchr_s_nul_nez(i8* %s) {
+define i1 @fold_strchr_s_nul_nez(ptr %s) {
; CHECK-LABEL: @fold_strchr_s_nul_nez(
; CHECK-NEXT: ret i1 true
;
- %p = call i8* @strchr(i8* %s, i32 0)
- %cmp = icmp ne i8* %p, null
+ %p = call ptr @strchr(ptr %s, i32 0)
+ %cmp = icmp ne ptr %p, null
ret i1 %cmp
}
; CHECK-NEXT: [[CHAR0CMP:%.*]] = icmp eq i8 [[TMP1]], 49
; CHECK-NEXT: ret i1 [[CHAR0CMP]]
;
- %p = getelementptr [5 x i8], [5 x i8]* @a5, i32 0, i32 0
- %q = call i8* @strchr(i8* %p, i32 %c)
- %cmp = icmp eq i8* %q, %p
+ %q = call ptr @strchr(ptr @a5, i32 %c)
+ %cmp = icmp eq ptr %q, @a5
ret i1 %cmp
}
@bell = constant [5 x i8] c"bell\00"
@null = constant [1 x i8] zeroinitializer
-declare i32 @strcmp(i8*, i8*)
+declare i32 @strcmp(ptr, ptr)
; strcmp("", x) -> -*x
-define i32 @test1(i8* %str2) {
+define i32 @test1(ptr %str2) {
; CHECK-LABEL: @test1(
-; CHECK: %strcmpload = load i8, i8* %str
+; CHECK: %strcmpload = load i8, ptr %str
; CHECK: %1 = zext i8 %strcmpload to i32
; CHECK: %2 = sub nsw i32 0, %1
; CHECK: ret i32 %2
; NOBCMP-LABEL: @test1(
-; NOBCMP-NEXT: [[STRCMPLOAD:%.*]] = load i8, i8* [[STR2:%.*]], align 1
+; NOBCMP-NEXT: [[STRCMPLOAD:%.*]] = load i8, ptr [[STR2:%.*]], align 1
; NOBCMP-NEXT: [[TMP1:%.*]] = zext i8 [[STRCMPLOAD]] to i32
; NOBCMP-NEXT: [[TMP2:%.*]] = sub nsw i32 0, [[TMP1]]
; NOBCMP-NEXT: ret i32 [[TMP2]]
;
; BCMP-LABEL: @test1(
-; BCMP-NEXT: [[STRCMPLOAD:%.*]] = load i8, i8* [[STR2:%.*]], align 1
+; BCMP-NEXT: [[STRCMPLOAD:%.*]] = load i8, ptr [[STR2:%.*]], align 1
; BCMP-NEXT: [[TMP1:%.*]] = zext i8 [[STRCMPLOAD]] to i32
; BCMP-NEXT: [[TMP2:%.*]] = sub nsw i32 0, [[TMP1]]
; BCMP-NEXT: ret i32 [[TMP2]]
;
- %str1 = getelementptr inbounds [1 x i8], [1 x i8]* @null, i32 0, i32 0
- %temp1 = call i32 @strcmp(i8* %str1, i8* %str2)
+ %temp1 = call i32 @strcmp(ptr @null, ptr %str2)
ret i32 %temp1
}
; strcmp(x, "") -> *x
-define i32 @test2(i8* %str1) {
+define i32 @test2(ptr %str1) {
; CHECK-LABEL: @test2(
-; CHECK: %strcmpload = load i8, i8* %str
+; CHECK: %strcmpload = load i8, ptr %str
; CHECK: %1 = zext i8 %strcmpload to i32
; CHECK: ret i32 %1
; NOBCMP-LABEL: @test2(
-; NOBCMP-NEXT: [[STRCMPLOAD:%.*]] = load i8, i8* [[STR1:%.*]], align 1
+; NOBCMP-NEXT: [[STRCMPLOAD:%.*]] = load i8, ptr [[STR1:%.*]], align 1
; NOBCMP-NEXT: [[TMP1:%.*]] = zext i8 [[STRCMPLOAD]] to i32
; NOBCMP-NEXT: ret i32 [[TMP1]]
;
; BCMP-LABEL: @test2(
-; BCMP-NEXT: [[STRCMPLOAD:%.*]] = load i8, i8* [[STR1:%.*]], align 1
+; BCMP-NEXT: [[STRCMPLOAD:%.*]] = load i8, ptr [[STR1:%.*]], align 1
; BCMP-NEXT: [[TMP1:%.*]] = zext i8 [[STRCMPLOAD]] to i32
; BCMP-NEXT: ret i32 [[TMP1]]
;
- %str2 = getelementptr inbounds [1 x i8], [1 x i8]* @null, i32 0, i32 0
- %temp1 = call i32 @strcmp(i8* %str1, i8* %str2)
+ %temp1 = call i32 @strcmp(ptr %str1, ptr @null)
ret i32 %temp1
}
; BCMP-LABEL: @test3(
; BCMP-NEXT: ret i32 -1
;
- %str1 = getelementptr inbounds [5 x i8], [5 x i8]* @hell, i32 0, i32 0
- %str2 = getelementptr inbounds [6 x i8], [6 x i8]* @hello, i32 0, i32 0
- %temp1 = call i32 @strcmp(i8* %str1, i8* %str2)
+ %temp1 = call i32 @strcmp(ptr @hell, ptr @hello)
ret i32 %temp1
}
; BCMP-LABEL: @test4(
; BCMP-NEXT: ret i32 1
;
- %str1 = getelementptr inbounds [5 x i8], [5 x i8]* @hell, i32 0, i32 0
- %str2 = getelementptr inbounds [1 x i8], [1 x i8]* @null, i32 0, i32 0
- %temp1 = call i32 @strcmp(i8* %str1, i8* %str2)
+ %temp1 = call i32 @strcmp(ptr @hell, ptr @null)
ret i32 %temp1
}
; (This transform is rather difficult to trigger in a useful manner)
define i32 @test5(i1 %b) {
; CHECK-LABEL: @test5(
-; CHECK: %memcmp = call i32 @memcmp(i8* getelementptr inbounds ([6 x i8], [6 x i8]* @hello, i32 0, i32 0), i8* %str2, i32 5)
+; CHECK: %memcmp = call i32 @memcmp(ptr @hello, ptr %str2, i32 5)
; CHECK: ret i32 %memcmp
; NOBCMP-LABEL: @test5(
-; NOBCMP-NEXT: [[STR2:%.*]] = select i1 [[B:%.*]], i8* getelementptr inbounds ([5 x i8], [5 x i8]* @hell, i32 0, i32 0), i8* getelementptr inbounds ([5 x i8], [5 x i8]* @bell, i32 0, i32 0)
-; NOBCMP-NEXT: [[MEMCMP:%.*]] = call i32 @memcmp(i8* noundef nonnull dereferenceable(5) getelementptr inbounds ([6 x i8], [6 x i8]* @hello, i32 0, i32 0), i8* noundef nonnull dereferenceable(5) [[STR2]], i32 5)
+; NOBCMP-NEXT: [[STR2:%.*]] = select i1 [[B:%.*]], ptr @hell, ptr @bell
+; NOBCMP-NEXT: [[MEMCMP:%.*]] = call i32 @memcmp(ptr noundef nonnull dereferenceable(5) @hello, ptr noundef nonnull dereferenceable(5) [[STR2]], i32 5)
; NOBCMP-NEXT: ret i32 [[MEMCMP]]
;
; BCMP-LABEL: @test5(
-; BCMP-NEXT: [[STR2:%.*]] = select i1 [[B:%.*]], i8* getelementptr inbounds ([5 x i8], [5 x i8]* @hell, i32 0, i32 0), i8* getelementptr inbounds ([5 x i8], [5 x i8]* @bell, i32 0, i32 0)
-; BCMP-NEXT: [[MEMCMP:%.*]] = call i32 @memcmp(i8* noundef nonnull dereferenceable(5) getelementptr inbounds ([6 x i8], [6 x i8]* @hello, i32 0, i32 0), i8* noundef nonnull dereferenceable(5) [[STR2]], i32 5)
+; BCMP-NEXT: [[STR2:%.*]] = select i1 [[B:%.*]], ptr @hell, ptr @bell
+; BCMP-NEXT: [[MEMCMP:%.*]] = call i32 @memcmp(ptr noundef nonnull dereferenceable(5) @hello, ptr noundef nonnull dereferenceable(5) [[STR2]], i32 5)
; BCMP-NEXT: ret i32 [[MEMCMP]]
;
- %str1 = getelementptr inbounds [6 x i8], [6 x i8]* @hello, i32 0, i32 0
- %temp1 = getelementptr inbounds [5 x i8], [5 x i8]* @hell, i32 0, i32 0
- %temp2 = getelementptr inbounds [5 x i8], [5 x i8]* @bell, i32 0, i32 0
- %str2 = select i1 %b, i8* %temp1, i8* %temp2
- %temp3 = call i32 @strcmp(i8* %str1, i8* %str2)
+ %str2 = select i1 %b, ptr @hell, ptr @bell
+ %temp3 = call i32 @strcmp(ptr @hello, ptr %str2)
ret i32 %temp3
}
; strcmp(x,x) -> 0
-define i32 @test6(i8* %str) {
+define i32 @test6(ptr %str) {
; CHECK-LABEL: @test6(
; CHECK: ret i32 0
; NOBCMP-LABEL: @test6(
; BCMP-LABEL: @test6(
; BCMP-NEXT: ret i32 0
;
- %temp1 = call i32 @strcmp(i8* %str, i8* %str)
+ %temp1 = call i32 @strcmp(ptr %str, ptr %str)
ret i32 %temp1
}
; strcmp(x, y) == 0 -> bcmp(x, y, <known length>)
define i1 @test7(i1 %b) {
; NOBCMP-LABEL: @test7(
-; NOBCMP-NEXT: [[STR2:%.*]] = select i1 [[B:%.*]], i8* getelementptr inbounds ([5 x i8], [5 x i8]* @hell, i32 0, i32 0), i8* getelementptr inbounds ([5 x i8], [5 x i8]* @bell, i32 0, i32 0)
-; NOBCMP-NEXT: [[MEMCMP:%.*]] = call i32 @memcmp(i8* noundef nonnull dereferenceable(5) getelementptr inbounds ([6 x i8], [6 x i8]* @hello, i32 0, i32 0), i8* noundef nonnull dereferenceable(5) [[STR2]], i32 5)
+; NOBCMP-NEXT: [[STR2:%.*]] = select i1 [[B:%.*]], ptr @hell, ptr @bell
+; NOBCMP-NEXT: [[MEMCMP:%.*]] = call i32 @memcmp(ptr noundef nonnull dereferenceable(5) @hello, ptr noundef nonnull dereferenceable(5) [[STR2]], i32 5)
; NOBCMP-NEXT: [[RES:%.*]] = icmp eq i32 [[MEMCMP]], 0
; NOBCMP-NEXT: ret i1 [[RES]]
;
; BCMP-LABEL: @test7(
-; BCMP-NEXT: [[STR2:%.*]] = select i1 [[B:%.*]], i8* getelementptr inbounds ([5 x i8], [5 x i8]* @hell, i32 0, i32 0), i8* getelementptr inbounds ([5 x i8], [5 x i8]* @bell, i32 0, i32 0)
-; BCMP-NEXT: [[BCMP:%.*]] = call i32 @bcmp(i8* noundef nonnull dereferenceable(5) getelementptr inbounds ([6 x i8], [6 x i8]* @hello, i32 0, i32 0), i8* noundef nonnull dereferenceable(5) [[STR2]], i32 5)
+; BCMP-NEXT: [[STR2:%.*]] = select i1 [[B:%.*]], ptr @hell, ptr @bell
+; BCMP-NEXT: [[BCMP:%.*]] = call i32 @bcmp(ptr noundef nonnull dereferenceable(5) @hello, ptr noundef nonnull dereferenceable(5) [[STR2]], i32 5)
; BCMP-NEXT: [[RES:%.*]] = icmp eq i32 [[BCMP]], 0
; BCMP-NEXT: ret i1 [[RES]]
;
- %str1 = getelementptr inbounds [6 x i8], [6 x i8]* @hello, i32 0, i32 0
- %temp1 = getelementptr inbounds [5 x i8], [5 x i8]* @hell, i32 0, i32 0
- %temp2 = getelementptr inbounds [5 x i8], [5 x i8]* @bell, i32 0, i32 0
- %str2 = select i1 %b, i8* %temp1, i8* %temp2
- %temp3 = call i32 @strcmp(i8* %str1, i8* %str2)
+ %str2 = select i1 %b, ptr @hell, ptr @bell
+ %temp3 = call i32 @strcmp(ptr @hello, ptr %str2)
%res = icmp eq i32 %temp3, 0
ret i1 %res
}
@hello = constant [6 x i8] c"hello\00"
@hell = constant [5 x i8] c"hell\00"
-declare i16 @strcmp(i8*, i8*)
+declare i16 @strcmp(ptr, ptr)
define i16 @test_nosimplify() {
; CHECK-LABEL: @test_nosimplify(
; CHECK: call i16 @strcmp
; CHECK: ret i16 %temp1
- %str1 = getelementptr inbounds [5 x i8], [5 x i8]* @hell, i32 0, i32 0
- %str2 = getelementptr inbounds [6 x i8], [6 x i8]* @hello, i32 0, i32 0
- %temp1 = call i16 @strcmp(i8* %str1, i8* %str2)
+ %temp1 = call i16 @strcmp(ptr @hell, ptr @hello)
ret i16 %temp1
}
;
; RUN: opt < %s -passes=instcombine -S | FileCheck %s
-declare i32 @strcmp(i8*, i8*)
+declare i32 @strcmp(ptr, ptr)
@a5 = constant [5 x [4 x i8]] [[4 x i8] c"123\00", [4 x i8] c"123\00", [4 x i8] c"12\00\00", [4 x i8] zeroinitializer, [4 x i8] zeroinitializer]
; CHECK-LABEL: @fold_strcmp_a5i0_a5i1_to_0(
; CHECK-NEXT: ret i32 0
;
- %p = getelementptr [5 x [4 x i8]], [5 x [4 x i8]]* @a5, i64 0, i64 0, i64 0
- %q = getelementptr [5 x [4 x i8]], [5 x [4 x i8]]* @a5, i64 0, i64 1, i64 0
+ %q = getelementptr [5 x [4 x i8]], ptr @a5, i64 0, i64 1, i64 0
- %cmp = call i32 @strcmp(i8* %p, i8* %q)
+ %cmp = call i32 @strcmp(ptr @a5, ptr %q)
ret i32 %cmp
}
define i32 @call_strcmp_a5i0_a5iI(i64 %I) {
; CHECK-LABEL: @call_strcmp_a5i0_a5iI(
-; CHECK-NEXT: [[Q:%.*]] = getelementptr [5 x [4 x i8]], [5 x [4 x i8]]* @a5, i64 0, i64 [[I:%.*]], i64 0
-; CHECK-NEXT: [[CMP:%.*]] = call i32 @strcmp(i8* noundef nonnull dereferenceable(4) getelementptr inbounds ([5 x [4 x i8]], [5 x [4 x i8]]* @a5, i64 0, i64 0, i64 0), i8* noundef nonnull dereferenceable(1) [[Q]])
+; CHECK-NEXT: [[Q:%.*]] = getelementptr [5 x [4 x i8]], ptr @a5, i64 0, i64 [[I:%.*]], i64 0
+; CHECK-NEXT: [[CMP:%.*]] = call i32 @strcmp(ptr noundef nonnull dereferenceable(4) @a5, ptr noundef nonnull dereferenceable(1) [[Q]])
; CHECK-NEXT: ret i32 [[CMP]]
;
- %p = getelementptr [5 x [4 x i8]], [5 x [4 x i8]]* @a5, i64 0, i64 0, i64 0
- %q = getelementptr [5 x [4 x i8]], [5 x [4 x i8]]* @a5, i64 0, i64 %I, i64 0
+ %q = getelementptr [5 x [4 x i8]], ptr @a5, i64 0, i64 %I, i64 0
- %cmp = call i32 @strcmp(i8* %p, i8* %q)
+ %cmp = call i32 @strcmp(ptr @a5, ptr %q)
ret i32 %cmp
}
define i32 @call_strcmp_a5iI_a5i0(i64 %I) {
; CHECK-LABEL: @call_strcmp_a5iI_a5i0(
-; CHECK-NEXT: [[P:%.*]] = getelementptr [5 x [4 x i8]], [5 x [4 x i8]]* @a5, i64 0, i64 [[I:%.*]], i64 0
-; CHECK-NEXT: [[CMP:%.*]] = call i32 @strcmp(i8* noundef nonnull dereferenceable(1) [[P]], i8* noundef nonnull dereferenceable(4) getelementptr inbounds ([5 x [4 x i8]], [5 x [4 x i8]]* @a5, i64 0, i64 0, i64 0))
+; CHECK-NEXT: [[P:%.*]] = getelementptr [5 x [4 x i8]], ptr @a5, i64 0, i64 [[I:%.*]], i64 0
+; CHECK-NEXT: [[CMP:%.*]] = call i32 @strcmp(ptr noundef nonnull dereferenceable(1) [[P]], ptr noundef nonnull dereferenceable(4) @a5)
; CHECK-NEXT: ret i32 [[CMP]]
;
- %p = getelementptr [5 x [4 x i8]], [5 x [4 x i8]]* @a5, i64 0, i64 %I, i64 0
- %q = getelementptr [5 x [4 x i8]], [5 x [4 x i8]]* @a5, i64 0, i64 0, i64 0
+ %p = getelementptr [5 x [4 x i8]], ptr @a5, i64 0, i64 %I, i64 0
- %cmp = call i32 @strcmp(i8* %p, i8* %q)
+ %cmp = call i32 @strcmp(ptr %p, ptr @a5)
ret i32 %cmp
}
; CHECK-LABEL: @fold_strcmp_a5i0_a5i1_p1_to_0(
; CHECK-NEXT: ret i32 -1
;
- %p = getelementptr [5 x [4 x i8]], [5 x [4 x i8]]* @a5, i64 0, i64 0, i64 0
- %q = getelementptr [5 x [4 x i8]], [5 x [4 x i8]]* @a5, i64 0, i64 1, i64 1
+ %q = getelementptr [5 x [4 x i8]], ptr @a5, i64 0, i64 1, i64 1
- %cmp = call i32 @strcmp(i8* %p, i8* %q)
+ %cmp = call i32 @strcmp(ptr @a5, ptr %q)
ret i32 %cmp
}
define i32 @call_strcmp_a5i0_a5i1_pI(i64 %I) {
; CHECK-LABEL: @call_strcmp_a5i0_a5i1_pI(
-; CHECK-NEXT: [[Q:%.*]] = getelementptr [5 x [4 x i8]], [5 x [4 x i8]]* @a5, i64 0, i64 1, i64 [[I:%.*]]
-; CHECK-NEXT: [[CMP:%.*]] = call i32 @strcmp(i8* noundef nonnull dereferenceable(4) getelementptr inbounds ([5 x [4 x i8]], [5 x [4 x i8]]* @a5, i64 0, i64 0, i64 0), i8* noundef nonnull dereferenceable(1) [[Q]])
+; CHECK-NEXT: [[Q:%.*]] = getelementptr [5 x [4 x i8]], ptr @a5, i64 0, i64 1, i64 [[I:%.*]]
+; CHECK-NEXT: [[CMP:%.*]] = call i32 @strcmp(ptr noundef nonnull dereferenceable(4) @a5, ptr noundef nonnull dereferenceable(1) [[Q]])
; CHECK-NEXT: ret i32 [[CMP]]
;
- %p = getelementptr [5 x [4 x i8]], [5 x [4 x i8]]* @a5, i64 0, i64 0, i64 0
- %q = getelementptr [5 x [4 x i8]], [5 x [4 x i8]]* @a5, i64 0, i64 1, i64 %I
+ %q = getelementptr [5 x [4 x i8]], ptr @a5, i64 0, i64 1, i64 %I
- %cmp = call i32 @strcmp(i8* %p, i8* %q)
+ %cmp = call i32 @strcmp(ptr @a5, ptr %q)
ret i32 %cmp
}
; CHECK-LABEL: @fold_strcmp_a5i0_p1_a5i1_to_0(
; CHECK-NEXT: ret i32 1
;
- %p = getelementptr [5 x [4 x i8]], [5 x [4 x i8]]* @a5, i64 0, i64 0, i64 1
- %q = getelementptr [5 x [4 x i8]], [5 x [4 x i8]]* @a5, i64 0, i64 1, i64 0
+ %p = getelementptr [5 x [4 x i8]], ptr @a5, i64 0, i64 0, i64 1
+ %q = getelementptr [5 x [4 x i8]], ptr @a5, i64 0, i64 1, i64 0
- %cmp = call i32 @strcmp(i8* %p, i8* %q)
+ %cmp = call i32 @strcmp(ptr %p, ptr %q)
ret i32 %cmp
}
; CHECK-LABEL: @fold_strcmp_a5i0_a5i2_to_0(
; CHECK-NEXT: ret i32 1
;
- %p = getelementptr [5 x [4 x i8]], [5 x [4 x i8]]* @a5, i64 0, i64 0, i64 0
- %q = getelementptr [5 x [4 x i8]], [5 x [4 x i8]]* @a5, i64 0, i64 2, i64 0
+ %q = getelementptr [5 x [4 x i8]], ptr @a5, i64 0, i64 2, i64 0
- %cmp = call i32 @strcmp(i8* %p, i8* %q)
+ %cmp = call i32 @strcmp(ptr @a5, ptr %q)
ret i32 %cmp
}
; CHECK-LABEL: @fold_strcmp_a5i2_a5i0_to_m1(
; CHECK-NEXT: ret i32 -1
;
- %p = getelementptr [5 x [4 x i8]], [5 x [4 x i8]]* @a5, i64 0, i64 0, i64 0
- %q = getelementptr [5 x [4 x i8]], [5 x [4 x i8]]* @a5, i64 0, i64 2, i64 0
+ %q = getelementptr [5 x [4 x i8]], ptr @a5, i64 0, i64 2, i64 0
- %cmp = call i32 @strcmp(i8* %q, i8* %p)
+ %cmp = call i32 @strcmp(ptr %q, ptr @a5)
ret i32 %cmp
}
; TODO: Test that ...
; RUN: opt < %s -passes=instcombine -S | FileCheck %s
-declare i32 @strcmp(i8*, i8*)
+declare i32 @strcmp(ptr, ptr)
@s9 = constant [10 x i8] c"123456789\00"
define i32 @fold_strcmp_s3_x_s4_s3(i1 %C) {
; CHECK-LABEL: @fold_strcmp_s3_x_s4_s3(
-; CHECK-NEXT: [[PTR:%.*]] = select i1 [[C:%.*]], i8* getelementptr inbounds ([10 x i8], [10 x i8]* @s9, i64 0, i64 6), i8* getelementptr inbounds ([10 x i8], [10 x i8]* @s9, i64 0, i64 5)
-; CHECK-NEXT: [[CMP:%.*]] = call i32 @strcmp(i8* noundef nonnull dereferenceable(1) [[PTR]], i8* noundef nonnull dereferenceable(4) getelementptr inbounds ([10 x i8], [10 x i8]* @s9, i64 0, i64 6))
+; CHECK-NEXT: [[PTR:%.*]] = select i1 [[C:%.*]], ptr getelementptr inbounds ([10 x i8], ptr @s9, i64 0, i64 6), ptr getelementptr inbounds ([10 x i8], ptr @s9, i64 0, i64 5)
+; CHECK-NEXT: [[CMP:%.*]] = call i32 @strcmp(ptr noundef nonnull dereferenceable(1) [[PTR]], ptr noundef nonnull dereferenceable(4) getelementptr inbounds ([10 x i8], ptr @s9, i64 0, i64 6))
; CHECK-NEXT: ret i32 [[CMP]]
;
- %ps3 = getelementptr [10 x i8], [10 x i8]* @s9, i64 0, i64 6
- %ps4 = getelementptr [10 x i8], [10 x i8]* @s9, i64 0, i64 5
+ %ps3 = getelementptr [10 x i8], ptr @s9, i64 0, i64 6
+ %ps4 = getelementptr [10 x i8], ptr @s9, i64 0, i64 5
- %ptr = select i1 %C, i8* %ps3, i8* %ps4
- %cmp = call i32 @strcmp(i8* %ptr, i8* %ps3)
+ %ptr = select i1 %C, ptr %ps3, ptr %ps4
+ %cmp = call i32 @strcmp(ptr %ptr, ptr %ps3)
ret i32 %cmp
}
@hello = constant [6 x i8] c"hello\00"
@a = common global [32 x i8] zeroinitializer, align 1
-; Expected type: i8* @strcpy(i8*, i8*)
-declare i16 @strcpy(i8*, i8*)
+; Expected type: ptr @strcpy(ptr, ptr)
+declare i16 @strcpy(ptr, ptr)
define void @test_no_simplify1() {
; CHECK-LABEL: @test_no_simplify1(
- %dst = getelementptr [32 x i8], [32 x i8]* @a, i32 0, i32 0
- %src = getelementptr [6 x i8], [6 x i8]* @hello, i32 0, i32 0
- call i16 @strcpy(i8* %dst, i8* %src)
+ call i16 @strcpy(ptr @a, ptr @hello)
; CHECK: call i16 @strcpy
ret void
}
@str = private unnamed_addr addrspace(200) constant [17 x i8] c"exactly 16 chars\00", align 1
-declare i8 addrspace(200)* @strcpy(i8 addrspace(200)*, i8 addrspace(200)*) addrspace(200)
-declare i8 addrspace(200)* @stpcpy(i8 addrspace(200)*, i8 addrspace(200)*) addrspace(200)
-declare i8 addrspace(200)* @strncpy(i8 addrspace(200)*, i8 addrspace(200)*, i64) addrspace(200)
-declare i8 addrspace(200)* @stpncpy(i8 addrspace(200)*, i8 addrspace(200)*, i64) addrspace(200)
+declare ptr addrspace(200) @strcpy(ptr addrspace(200), ptr addrspace(200)) addrspace(200)
+declare ptr addrspace(200) @stpcpy(ptr addrspace(200), ptr addrspace(200)) addrspace(200)
+declare ptr addrspace(200) @strncpy(ptr addrspace(200), ptr addrspace(200), i64) addrspace(200)
+declare ptr addrspace(200) @stpncpy(ptr addrspace(200), ptr addrspace(200), i64) addrspace(200)
-define void @test_strcpy_to_memcpy(i8 addrspace(200)* %dst) addrspace(200) nounwind {
+define void @test_strcpy_to_memcpy(ptr addrspace(200) %dst) addrspace(200) nounwind {
; CHECK-LABEL: define {{[^@]+}}@test_strcpy_to_memcpy
-; CHECK-SAME: (i8 addrspace(200)* [[DST:%.*]]) addrspace(200) #[[ATTR1:[0-9]+]] {
+; CHECK-SAME: (ptr addrspace(200) [[DST:%.*]]) addrspace(200) #[[ATTR1:[0-9]+]] {
; CHECK-NEXT: entry:
-; CHECK-NEXT: call addrspace(200) void @llvm.memcpy.p200i8.p200i8.i64(i8 addrspace(200)* noundef align 1 dereferenceable(17) [[DST]], i8 addrspace(200)* noundef align 1 dereferenceable(17) getelementptr inbounds ([17 x i8], [17 x i8] addrspace(200)* @str, i64 0, i64 0), i64 17, i1 false)
+; CHECK-NEXT: call addrspace(200) void @llvm.memcpy.p200.p200.i64(ptr addrspace(200) noundef align 1 dereferenceable(17) [[DST]], ptr addrspace(200) noundef align 1 dereferenceable(17) @str, i64 17, i1 false)
; CHECK-NEXT: ret void
;
entry:
- %call = call i8 addrspace(200)* @strcpy(i8 addrspace(200)* %dst, i8 addrspace(200)* getelementptr inbounds ([17 x i8], [17 x i8] addrspace(200)* @str, i64 0, i64 0))
+ %call = call ptr addrspace(200) @strcpy(ptr addrspace(200) %dst, ptr addrspace(200) @str)
ret void
}
-define void @test_stpcpy_to_memcpy(i8 addrspace(200)* %dst) addrspace(200) nounwind {
+define void @test_stpcpy_to_memcpy(ptr addrspace(200) %dst) addrspace(200) nounwind {
; CHECK-LABEL: define {{[^@]+}}@test_stpcpy_to_memcpy
-; CHECK-SAME: (i8 addrspace(200)* [[DST:%.*]]) addrspace(200) #[[ATTR1]] {
+; CHECK-SAME: (ptr addrspace(200) [[DST:%.*]]) addrspace(200) #[[ATTR1]] {
; CHECK-NEXT: entry:
-; CHECK-NEXT: call addrspace(200) void @llvm.memcpy.p200i8.p200i8.i64(i8 addrspace(200)* noundef align 1 dereferenceable(17) [[DST]], i8 addrspace(200)* noundef align 1 dereferenceable(17) getelementptr inbounds ([17 x i8], [17 x i8] addrspace(200)* @str, i64 0, i64 0), i64 17, i1 false)
+; CHECK-NEXT: call addrspace(200) void @llvm.memcpy.p200.p200.i64(ptr addrspace(200) noundef align 1 dereferenceable(17) [[DST]], ptr addrspace(200) noundef align 1 dereferenceable(17) @str, i64 17, i1 false)
; CHECK-NEXT: ret void
;
entry:
- %call = call i8 addrspace(200)* @stpcpy(i8 addrspace(200)* %dst, i8 addrspace(200)* getelementptr inbounds ([17 x i8], [17 x i8] addrspace(200)* @str, i64 0, i64 0))
+ %call = call ptr addrspace(200) @stpcpy(ptr addrspace(200) %dst, ptr addrspace(200) @str)
ret void
}
-define void @test_stpcpy_to_strcpy(i8 addrspace(200)* %dst, i8 addrspace(200)* %src) addrspace(200) nounwind {
+define void @test_stpcpy_to_strcpy(ptr addrspace(200) %dst, ptr addrspace(200) %src) addrspace(200) nounwind {
; CHECK-LABEL: define {{[^@]+}}@test_stpcpy_to_strcpy
-; CHECK-SAME: (i8 addrspace(200)* [[DST:%.*]], i8 addrspace(200)* [[SRC:%.*]]) addrspace(200) #[[ATTR1]] {
+; CHECK-SAME: (ptr addrspace(200) [[DST:%.*]], ptr addrspace(200) [[SRC:%.*]]) addrspace(200) #[[ATTR1]] {
; CHECK-NEXT: entry:
-; CHECK-NEXT: [[STRCPY:%.*]] = call addrspace(200) i8 addrspace(200)* @strcpy(i8 addrspace(200)* noundef [[DST]], i8 addrspace(200)* noundef [[SRC]])
+; CHECK-NEXT: [[STRCPY:%.*]] = call addrspace(200) ptr addrspace(200) @strcpy(ptr addrspace(200) noundef [[DST]], ptr addrspace(200) noundef [[SRC]])
; CHECK-NEXT: ret void
;
entry:
- %call = call i8 addrspace(200)* @stpcpy(i8 addrspace(200)* %dst, i8 addrspace(200)* %src)
+ %call = call ptr addrspace(200) @stpcpy(ptr addrspace(200) %dst, ptr addrspace(200) %src)
ret void
}
-define void @test_strncpy_to_memcpy(i8 addrspace(200)* %dst) addrspace(200) nounwind {
+define void @test_strncpy_to_memcpy(ptr addrspace(200) %dst) addrspace(200) nounwind {
; CHECK-LABEL: define {{[^@]+}}@test_strncpy_to_memcpy
-; CHECK-SAME: (i8 addrspace(200)* [[DST:%.*]]) addrspace(200) #[[ATTR1]] {
+; CHECK-SAME: (ptr addrspace(200) [[DST:%.*]]) addrspace(200) #[[ATTR1]] {
; CHECK-NEXT: entry:
-; CHECK-NEXT: call addrspace(200) void @llvm.memcpy.p200i8.p200i8.i128(i8 addrspace(200)* noundef align 1 dereferenceable(17) [[DST]], i8 addrspace(200)* noundef align 1 dereferenceable(17) getelementptr inbounds ([17 x i8], [17 x i8] addrspace(200)* @str, i64 0, i64 0), i128 17, i1 false)
+; CHECK-NEXT: call addrspace(200) void @llvm.memcpy.p200.p200.i128(ptr addrspace(200) noundef align 1 dereferenceable(17) [[DST]], ptr addrspace(200) noundef align 1 dereferenceable(17) @str, i128 17, i1 false)
; CHECK-NEXT: ret void
;
entry:
- %call = call i8 addrspace(200)* @strncpy(i8 addrspace(200)* %dst, i8 addrspace(200)* getelementptr inbounds ([17 x i8], [17 x i8] addrspace(200)* @str, i64 0, i64 0), i64 17)
+ %call = call ptr addrspace(200) @strncpy(ptr addrspace(200) %dst, ptr addrspace(200) @str, i64 17)
ret void
}
-define void @test_stpncpy_to_memcpy(i8 addrspace(200)* %dst) addrspace(200) nounwind {
+define void @test_stpncpy_to_memcpy(ptr addrspace(200) %dst) addrspace(200) nounwind {
; CHECK-LABEL: define {{[^@]+}}@test_stpncpy_to_memcpy
-; CHECK-SAME: (i8 addrspace(200)* [[DST:%.*]]) addrspace(200) #[[ATTR1]] {
+; CHECK-SAME: (ptr addrspace(200) [[DST:%.*]]) addrspace(200) #[[ATTR1]] {
; CHECK-NEXT: entry:
-; CHECK-NEXT: call addrspace(200) void @llvm.memcpy.p200i8.p200i8.i128(i8 addrspace(200)* noundef align 1 dereferenceable(17) [[DST]], i8 addrspace(200)* noundef align 1 dereferenceable(17) getelementptr inbounds ([17 x i8], [17 x i8] addrspace(200)* @str, i64 0, i64 0), i128 17, i1 false)
+; CHECK-NEXT: call addrspace(200) void @llvm.memcpy.p200.p200.i128(ptr addrspace(200) noundef align 1 dereferenceable(17) [[DST]], ptr addrspace(200) noundef align 1 dereferenceable(17) @str, i128 17, i1 false)
; CHECK-NEXT: ret void
;
entry:
- %call = call i8 addrspace(200)* @stpncpy(i8 addrspace(200)* %dst, i8 addrspace(200)* getelementptr inbounds ([17 x i8], [17 x i8] addrspace(200)* @str, i64 0, i64 0), i64 17)
+ %call = call ptr addrspace(200) @stpncpy(ptr addrspace(200) %dst, ptr addrspace(200) @str, i64 17)
ret void
}
define void @test_no_simplify() {
; CHECK-LABEL: @test_no_simplify(
- %dst = getelementptr inbounds [60 x i16], [60 x i16]* @a, i32 0, i32 0
- %src = getelementptr inbounds [8 x i8], [8 x i8]* @.str, i32 0, i32 0
; CHECK-NEXT: call i16 @__strcpy_chk
- call i16 @__strcpy_chk(i16* %dst, i8* %src, i32 8)
+ call i16 @__strcpy_chk(ptr @a, ptr @.str, i32 8)
ret void
}
-declare i16 @__strcpy_chk(i16*, i8*, i32)
+declare i16 @__strcpy_chk(ptr, ptr, i32)
@abc = constant [4 x i8] c"abc\00"
@null = constant [1 x i8] zeroinitializer
-declare i64 @strcspn(i8*, i8*)
+declare i64 @strcspn(ptr, ptr)
; Check strcspn(s, "") -> strlen(s).
-define i64 @test_simplify1(i8* %str) {
+define i64 @test_simplify1(ptr %str) {
; CHECK-LABEL: @test_simplify1(
-; CHECK-NEXT: [[STRLEN:%.*]] = call i64 @strlen(i8* noundef nonnull dereferenceable(1) [[STR:%.*]])
+; CHECK-NEXT: [[STRLEN:%.*]] = call i64 @strlen(ptr noundef nonnull dereferenceable(1) [[STR:%.*]])
; CHECK-NEXT: ret i64 [[STRLEN]]
;
- %pat = getelementptr [1 x i8], [1 x i8]* @null, i32 0, i32 0
- %ret = call i64 @strcspn(i8* %str, i8* %pat)
+ %ret = call i64 @strcspn(ptr %str, ptr @null)
ret i64 %ret
}
; Check strcspn("", s) -> 0.
-define i64 @test_simplify2(i8* %pat) {
+define i64 @test_simplify2(ptr %pat) {
; CHECK-LABEL: @test_simplify2(
; CHECK-NEXT: ret i64 0
;
- %str = getelementptr [1 x i8], [1 x i8]* @null, i32 0, i32 0
- %ret = call i64 @strcspn(i8* %str, i8* %pat)
+ %ret = call i64 @strcspn(ptr @null, ptr %pat)
ret i64 %ret
}
; CHECK-LABEL: @test_simplify3(
; CHECK-NEXT: ret i64 0
;
- %str = getelementptr [6 x i8], [6 x i8]* @abcba, i32 0, i32 0
- %pat = getelementptr [4 x i8], [4 x i8]* @abc, i32 0, i32 0
- %ret = call i64 @strcspn(i8* %str, i8* %pat)
+ %ret = call i64 @strcspn(ptr @abcba, ptr @abc)
ret i64 %ret
}
; Check cases that shouldn't be simplified.
-define i64 @test_no_simplify1(i8* %str, i8* %pat) {
+define i64 @test_no_simplify1(ptr %str, ptr %pat) {
; CHECK-LABEL: @test_no_simplify1(
-; CHECK-NEXT: [[RET:%.*]] = call i64 @strcspn(i8* [[STR:%.*]], i8* [[PAT:%.*]])
+; CHECK-NEXT: [[RET:%.*]] = call i64 @strcspn(ptr [[STR:%.*]], ptr [[PAT:%.*]])
; CHECK-NEXT: ret i64 [[RET]]
;
- %ret = call i64 @strcspn(i8* %str, i8* %pat)
+ %ret = call i64 @strcspn(ptr %str, ptr %pat)
ret i64 %ret
}
@null = constant [1 x i8] zeroinitializer
-declare double @strcspn(i8*, i8*)
+declare double @strcspn(ptr, ptr)
; Check that strcspn functions with the wrong prototype aren't simplified.
-define double @test_no_simplify1(i8* %pat) {
+define double @test_no_simplify1(ptr %pat) {
; CHECK-LABEL: @test_no_simplify1(
- %str = getelementptr [1 x i8], [1 x i8]* @null, i32 0, i32 0
- %ret = call double @strcspn(i8* %str, i8* %pat)
+ %ret = call double @strcspn(ptr @null, ptr %pat)
; CHECK-NEXT: call double @strcspn
ret double %ret
; CHECK-NEXT: ret double %ret
; This would crash - PR50836
-define i64 @strlen(i32* %s) {
+define i64 @strlen(ptr %s) {
; CHECK-LABEL: @strlen(
-; CHECK-NEXT: [[R:%.*]] = call i64 @strlen(i32* noundef nonnull dereferenceable(1) [[S:%.*]])
+; CHECK-NEXT: [[R:%.*]] = call i64 @strlen(ptr noundef nonnull dereferenceable(1) [[S:%.*]])
; CHECK-NEXT: ret i64 0
;
- %r = call i64 @strlen(i32* %s)
+ %r = call i64 @strlen(ptr %s)
ret i64 0
}
;
; RUN: opt < %s -passes=instcombine -S | FileCheck %s
-declare i64 @strlen(i8*)
+declare i64 @strlen(ptr)
@a5_4 = constant [5 x [4 x i8]] [[4 x i8] c"123\00", [4 x i8] c"12\00\00", [4 x i8] c"1\00\00\00", [4 x i8] zeroinitializer, [4 x i8] zeroinitializer]
; CHECK-LABEL: @fold_a5_4_i0_to_3(
; CHECK-NEXT: ret i64 3
;
- %ptr = getelementptr [5 x [4 x i8]], [5 x [4 x i8]]* @a5_4, i64 0, i64 0, i64 0
- %len = call i64 @strlen(i8* %ptr)
+ %len = call i64 @strlen(ptr @a5_4)
ret i64 %len
}
; CHECK-LABEL: @fold_a5_4_i0_p1_to_2(
; CHECK-NEXT: ret i64 2
;
- %ptr = getelementptr [5 x [4 x i8]], [5 x [4 x i8]]* @a5_4, i64 0, i64 0, i64 1
- %len = call i64 @strlen(i8* %ptr)
+ %ptr = getelementptr [5 x [4 x i8]], ptr @a5_4, i64 0, i64 0, i64 1
+ %len = call i64 @strlen(ptr %ptr)
ret i64 %len
}
; CHECK-LABEL: @fold_a5_4_i0_p2_to_1(
; CHECK-NEXT: ret i64 1
;
- %ptr = getelementptr [5 x [4 x i8]], [5 x [4 x i8]]* @a5_4, i64 0, i64 0, i64 2
- %len = call i64 @strlen(i8* %ptr)
+ %ptr = getelementptr [5 x [4 x i8]], ptr @a5_4, i64 0, i64 0, i64 2
+ %len = call i64 @strlen(ptr %ptr)
ret i64 %len
}
; CHECK-LABEL: @fold_a5_4_i0_p3_to_0(
; CHECK-NEXT: ret i64 0
;
- %ptr = getelementptr [5 x [4 x i8]], [5 x [4 x i8]]* @a5_4, i64 0, i64 0, i64 3
- %len = call i64 @strlen(i8* %ptr)
+ %ptr = getelementptr [5 x [4 x i8]], ptr @a5_4, i64 0, i64 0, i64 3
+ %len = call i64 @strlen(ptr %ptr)
ret i64 %len
}
; CHECK-LABEL: @fold_a5_4_i1_to_2(
; CHECK-NEXT: ret i64 2
;
- %ptr = getelementptr [5 x [4 x i8]], [5 x [4 x i8]]* @a5_4, i64 0, i64 1, i64 0
- %len = call i64 @strlen(i8* %ptr)
+ %ptr = getelementptr [5 x [4 x i8]], ptr @a5_4, i64 0, i64 1, i64 0
+ %len = call i64 @strlen(ptr %ptr)
ret i64 %len
}
; CHECK-LABEL: @fold_a5_4_i1_p1_to_1(
; CHECK-NEXT: ret i64 1
;
- %ptr = getelementptr [5 x [4 x i8]], [5 x [4 x i8]]* @a5_4, i64 0, i64 1, i64 1
- %len = call i64 @strlen(i8* %ptr)
+ %ptr = getelementptr [5 x [4 x i8]], ptr @a5_4, i64 0, i64 1, i64 1
+ %len = call i64 @strlen(ptr %ptr)
ret i64 %len
}
; CHECK-LABEL: @fold_a5_4_i1_p2_to_0(
; CHECK-NEXT: ret i64 0
;
- %ptr = getelementptr [5 x [4 x i8]], [5 x [4 x i8]]* @a5_4, i64 0, i64 1, i64 2
- %len = call i64 @strlen(i8* %ptr)
+ %ptr = getelementptr [5 x [4 x i8]], ptr @a5_4, i64 0, i64 1, i64 2
+ %len = call i64 @strlen(ptr %ptr)
ret i64 %len
}
; CHECK-LABEL: @fold_a5_4_i1_p3_to_0(
; CHECK-NEXT: ret i64 0
;
- %ptr = getelementptr [5 x [4 x i8]], [5 x [4 x i8]]* @a5_4, i64 0, i64 1, i64 3
- %len = call i64 @strlen(i8* %ptr)
+ %ptr = getelementptr [5 x [4 x i8]], ptr @a5_4, i64 0, i64 1, i64 3
+ %len = call i64 @strlen(ptr %ptr)
ret i64 %len
}
; CHECK-LABEL: @fold_a5_4_i2_to_1(
; CHECK-NEXT: ret i64 1
;
- %ptr = getelementptr [5 x [4 x i8]], [5 x [4 x i8]]* @a5_4, i64 0, i64 2, i64 0
- %len = call i64 @strlen(i8* %ptr)
+ %ptr = getelementptr [5 x [4 x i8]], ptr @a5_4, i64 0, i64 2, i64 0
+ %len = call i64 @strlen(ptr %ptr)
ret i64 %len
}
; CHECK-LABEL: @fold_a5_4_i2_p1_to_0(
; CHECK-NEXT: ret i64 0
;
- %ptr = getelementptr [5 x [4 x i8]], [5 x [4 x i8]]* @a5_4, i64 0, i64 2, i64 1
- %len = call i64 @strlen(i8* %ptr)
+ %ptr = getelementptr [5 x [4 x i8]], ptr @a5_4, i64 0, i64 2, i64 1
+ %len = call i64 @strlen(ptr %ptr)
ret i64 %len
}
; CHECK-LABEL: @fold_a5_4_i2_p2_to_0(
; CHECK-NEXT: ret i64 0
;
- %ptr = getelementptr [5 x [4 x i8]], [5 x [4 x i8]]* @a5_4, i64 0, i64 2, i64 2
- %len = call i64 @strlen(i8* %ptr)
+ %ptr = getelementptr [5 x [4 x i8]], ptr @a5_4, i64 0, i64 2, i64 2
+ %len = call i64 @strlen(ptr %ptr)
ret i64 %len
}
; CHECK-LABEL: @fold_a5_4_i2_p3_to_0(
; CHECK-NEXT: ret i64 0
;
- %ptr = getelementptr [5 x [4 x i8]], [5 x [4 x i8]]* @a5_4, i64 0, i64 2, i64 3
- %len = call i64 @strlen(i8* %ptr)
+ %ptr = getelementptr [5 x [4 x i8]], ptr @a5_4, i64 0, i64 2, i64 3
+ %len = call i64 @strlen(ptr %ptr)
ret i64 %len
}
; CHECK-LABEL: @fold_a5_4_i3_to_0(
; CHECK-NEXT: ret i64 0
;
- %ptr = getelementptr [5 x [4 x i8]], [5 x [4 x i8]]* @a5_4, i64 0, i64 3, i64 0
- %len = call i64 @strlen(i8* %ptr)
+ %ptr = getelementptr [5 x [4 x i8]], ptr @a5_4, i64 0, i64 3, i64 0
+ %len = call i64 @strlen(ptr %ptr)
ret i64 %len
}
; CHECK-LABEL: @fold_a5_4_i3_p1_to_0(
; CHECK-NEXT: ret i64 0
;
- %ptr = getelementptr [5 x [4 x i8]], [5 x [4 x i8]]* @a5_4, i64 0, i64 3, i64 1
- %len = call i64 @strlen(i8* %ptr)
+ %ptr = getelementptr [5 x [4 x i8]], ptr @a5_4, i64 0, i64 3, i64 1
+ %len = call i64 @strlen(ptr %ptr)
ret i64 %len
}
; CHECK-LABEL: @fold_a5_4_i3_p2_to_0(
; CHECK-NEXT: ret i64 0
;
- %ptr = getelementptr [5 x [4 x i8]], [5 x [4 x i8]]* @a5_4, i64 0, i64 3, i64 2
- %len = call i64 @strlen(i8* %ptr)
+ %ptr = getelementptr [5 x [4 x i8]], ptr @a5_4, i64 0, i64 3, i64 2
+ %len = call i64 @strlen(ptr %ptr)
ret i64 %len
}
; CHECK-LABEL: @fold_a5_3_i4_p3_to_0(
; CHECK-NEXT: ret i64 0
;
- %ptr = getelementptr [5 x [4 x i8]], [5 x [4 x i8]]* @a5_4, i64 0, i64 3, i64 3
- %len = call i64 @strlen(i8* %ptr)
+ %ptr = getelementptr [5 x [4 x i8]], ptr @a5_4, i64 0, i64 3, i64 3
+ %len = call i64 @strlen(ptr %ptr)
ret i64 %len
}
; CHECK-LABEL: @fold_a5_4_i4_to_0(
; CHECK-NEXT: ret i64 0
;
- %ptr = getelementptr [5 x [4 x i8]], [5 x [4 x i8]]* @a5_4, i64 0, i64 4, i64 0
- %len = call i64 @strlen(i8* %ptr)
+ %ptr = getelementptr [5 x [4 x i8]], ptr @a5_4, i64 0, i64 4, i64 0
+ %len = call i64 @strlen(ptr %ptr)
ret i64 %len
}
; CHECK-LABEL: @fold_a5_4_i4_p1_to_0(
; CHECK-NEXT: ret i64 0
;
- %ptr = getelementptr [5 x [4 x i8]], [5 x [4 x i8]]* @a5_4, i64 0, i64 4, i64 1
- %len = call i64 @strlen(i8* %ptr)
+ %ptr = getelementptr [5 x [4 x i8]], ptr @a5_4, i64 0, i64 4, i64 1
+ %len = call i64 @strlen(ptr %ptr)
ret i64 %len
}
; CHECK-LABEL: @fold_a5_4_i4_p2_to_0(
; CHECK-NEXT: ret i64 0
;
- %ptr = getelementptr [5 x [4 x i8]], [5 x [4 x i8]]* @a5_4, i64 0, i64 4, i64 2
- %len = call i64 @strlen(i8* %ptr)
+ %ptr = getelementptr [5 x [4 x i8]], ptr @a5_4, i64 0, i64 4, i64 2
+ %len = call i64 @strlen(ptr %ptr)
ret i64 %len
}
; CHECK-LABEL: @fold_a5_4_i4_p3_to_0(
; CHECK-NEXT: ret i64 0
;
- %ptr = getelementptr [5 x [4 x i8]], [5 x [4 x i8]]* @a5_4, i64 0, i64 4, i64 3
- %len = call i64 @strlen(i8* %ptr)
+ %ptr = getelementptr [5 x [4 x i8]], ptr @a5_4, i64 0, i64 4, i64 3
+ %len = call i64 @strlen(ptr %ptr)
ret i64 %len
}
@ax = external global [0 x i64]
-declare i64 @strlen(i8*)
+declare i64 @strlen(ptr)
; Fold strlen(a_s3.a) to 3.
; CHECK-LABEL: @fold_strlen_a_S3_to_3(
; CHECK-NEXT: ret i64 3
;
- %ptr = getelementptr %struct.A_a4, %struct.A_a4* @a_s3, i32 0, i32 0, i32 0
- %len = call i64 @strlen(i8* %ptr)
+ %len = call i64 @strlen(ptr @a_s3)
ret i64 %len
}
; CHECK-LABEL: @fold_strlen_a_S3_p1_to_2(
; CHECK-NEXT: ret i64 2
;
- %ptr = getelementptr %struct.A_a4, %struct.A_a4* @a_s3, i32 0, i32 0, i32 1
- %len = call i64 @strlen(i8* %ptr)
+ %ptr = getelementptr %struct.A_a4, ptr @a_s3, i32 0, i32 0, i32 1
+ %len = call i64 @strlen(ptr %ptr)
ret i64 %len
}
; CHECK-LABEL: @fold_strlen_a_S3_p2_to_1(
; CHECK-NEXT: ret i64 1
;
- %ptr = getelementptr %struct.A_a4, %struct.A_a4* @a_s3, i32 0, i32 0, i32 2
- %len = call i64 @strlen(i8* %ptr)
+ %ptr = getelementptr %struct.A_a4, ptr @a_s3, i32 0, i32 0, i32 2
+ %len = call i64 @strlen(ptr %ptr)
ret i64 %len
}
; CHECK-LABEL: @fold_strlen_a_S3_p3_to_0(
; CHECK-NEXT: ret i64 0
;
- %ptr = getelementptr %struct.A_a4, %struct.A_a4* @a_s3, i32 0, i32 0, i32 3
- %len = call i64 @strlen(i8* %ptr)
+ %ptr = getelementptr %struct.A_a4, ptr @a_s3, i32 0, i32 0, i32 3
+ %len = call i64 @strlen(ptr %ptr)
ret i64 %len
}
; CHECK-LABEL: @fold_strlen_a_S3_s4_to_3(
; CHECK-NEXT: ret i64 3
;
- %ptr = getelementptr %struct.A_a4_a5, %struct.A_a4_a5* @a_s3_s4, i32 0, i32 0, i32 0
- %len = call i64 @strlen(i8* %ptr)
+ %len = call i64 @strlen(ptr @a_s3_s4)
ret i64 %len
}
; CHECK-LABEL: @fold_strlen_a_S3_p2_s4_to_1(
; CHECK-NEXT: ret i64 1
;
- %ptr = getelementptr %struct.A_a4_a5, %struct.A_a4_a5* @a_s3_s4, i32 0, i32 0, i32 2
- %len = call i64 @strlen(i8* %ptr)
+ %ptr = getelementptr %struct.A_a4_a5, ptr @a_s3_s4, i32 0, i32 0, i32 2
+ %len = call i64 @strlen(ptr %ptr)
ret i64 %len
}
define void @fold_strlen_a_s3_S4_to_4() {
; CHECK-LABEL: @fold_strlen_a_s3_S4_to_4(
-; CHECK-NEXT: store i64 4, i64* getelementptr inbounds ([0 x i64], [0 x i64]* @ax, i64 0, i64 0), align 4
-; CHECK-NEXT: store i64 4, i64* getelementptr inbounds ([0 x i64], [0 x i64]* @ax, i64 0, i64 1), align 4
+; CHECK-NEXT: store i64 4, ptr @ax, align 4
+; CHECK-NEXT: store i64 4, ptr getelementptr inbounds ([0 x i64], ptr @ax, i64 0, i64 1), align 4
; CHECK-NEXT: ret void
;
- %p1 = getelementptr %struct.A_a4_a5, %struct.A_a4_a5* @a_s3_s4, i32 0, i32 0, i32 4
- %len1 = call i64 @strlen(i8* %p1)
- %pax0 = getelementptr inbounds [0 x i64], [0 x i64]* @ax, i64 0, i64 0
- store i64 %len1, i64* %pax0
+ %p1 = getelementptr %struct.A_a4_a5, ptr @a_s3_s4, i32 0, i32 0, i32 4
+ %len1 = call i64 @strlen(ptr %p1)
+ store i64 %len1, ptr @ax
- %p2 = getelementptr %struct.A_a4_a5, %struct.A_a4_a5* @a_s3_s4, i32 0, i32 1, i32 0
- %len2 = call i64 @strlen(i8* %p2)
- %pax1 = getelementptr inbounds [0 x i64], [0 x i64]* @ax, i64 0, i64 1
- store i64 %len1, i64* %pax1
+ %p2 = getelementptr %struct.A_a4_a5, ptr @a_s3_s4, i32 0, i32 1, i32 0
+ %len2 = call i64 @strlen(ptr %p2)
+ %pax1 = getelementptr inbounds [0 x i64], ptr @ax, i64 0, i64 1
+ store i64 %len1, ptr %pax1
ret void
}
define void @fold_strlen_a_s3_S4_p1_to_3() {
; CHECK-LABEL: @fold_strlen_a_s3_S4_p1_to_3(
-; CHECK-NEXT: store i64 3, i64* getelementptr inbounds ([0 x i64], [0 x i64]* @ax, i64 0, i64 0), align 4
-; CHECK-NEXT: store i64 3, i64* getelementptr inbounds ([0 x i64], [0 x i64]* @ax, i64 0, i64 1), align 4
+; CHECK-NEXT: store i64 3, ptr @ax, align 4
+; CHECK-NEXT: store i64 3, ptr getelementptr inbounds ([0 x i64], ptr @ax, i64 0, i64 1), align 4
; CHECK-NEXT: ret void
;
- %p1 = getelementptr %struct.A_a4_a5, %struct.A_a4_a5* @a_s3_s4, i32 0, i32 0, i32 5
- %len1 = call i64 @strlen(i8* %p1)
- %pax0 = getelementptr inbounds [0 x i64], [0 x i64]* @ax, i64 0, i64 0
- store i64 %len1, i64* %pax0
+ %p1 = getelementptr %struct.A_a4_a5, ptr @a_s3_s4, i32 0, i32 0, i32 5
+ %len1 = call i64 @strlen(ptr %p1)
+ store i64 %len1, ptr @ax
- %p2 = getelementptr %struct.A_a4_a5, %struct.A_a4_a5* @a_s3_s4, i32 0, i32 1, i32 1
- %len2 = call i64 @strlen(i8* %p2)
- %pax1 = getelementptr inbounds [0 x i64], [0 x i64]* @ax, i64 0, i64 1
- store i64 %len1, i64* %pax1
+ %p2 = getelementptr %struct.A_a4_a5, ptr @a_s3_s4, i32 0, i32 1, i32 1
+ %len2 = call i64 @strlen(ptr %p2)
+ %pax1 = getelementptr inbounds [0 x i64], ptr @ax, i64 0, i64 1
+ store i64 %len1, ptr %pax1
ret void
}
define void @fold_strlen_a_s3_i32_S4_to_4() {
; CHECK-LABEL: @fold_strlen_a_s3_i32_S4_to_4(
-; CHECK-NEXT: store i64 4, i64* getelementptr inbounds ([0 x i64], [0 x i64]* @ax, i64 0, i64 0), align 4
-; CHECK-NEXT: store i64 4, i64* getelementptr inbounds ([0 x i64], [0 x i64]* @ax, i64 0, i64 1), align 4
+; CHECK-NEXT: store i64 4, ptr @ax, align 4
+; CHECK-NEXT: store i64 4, ptr getelementptr inbounds ([0 x i64], ptr @ax, i64 0, i64 1), align 4
; CHECK-NEXT: ret void
;
- %p1 = getelementptr %struct.A_a4_i32_a5, %struct.A_a4_i32_a5* @a_s3_i32_s4, i32 0, i32 0, i32 8
- %len1 = call i64 @strlen(i8* %p1)
- %pax0 = getelementptr inbounds [0 x i64], [0 x i64]* @ax, i64 0, i64 0
- store i64 %len1, i64* %pax0
+ %p1 = getelementptr %struct.A_a4_i32_a5, ptr @a_s3_i32_s4, i32 0, i32 0, i32 8
+ %len1 = call i64 @strlen(ptr %p1)
+ store i64 %len1, ptr @ax
- %p2 = getelementptr %struct.A_a4_i32_a5, %struct.A_a4_i32_a5* @a_s3_i32_s4, i32 0, i32 2, i32 0
- %len2 = call i64 @strlen(i8* %p2)
- %pax1 = getelementptr inbounds [0 x i64], [0 x i64]* @ax, i64 0, i64 1
- store i64 %len1, i64* %pax1
+ %p2 = getelementptr %struct.A_a4_i32_a5, ptr @a_s3_i32_s4, i32 0, i32 2, i32 0
+ %len2 = call i64 @strlen(ptr %p2)
+ %pax1 = getelementptr inbounds [0 x i64], ptr @ax, i64 0, i64 1
+ store i64 %len1, ptr %pax1
ret void
}
define void @fold_strlen_a_s3_i32_S4_p1_to_3() {
; CHECK-LABEL: @fold_strlen_a_s3_i32_S4_p1_to_3(
-; CHECK-NEXT: store i64 3, i64* getelementptr inbounds ([0 x i64], [0 x i64]* @ax, i64 0, i64 0), align 4
-; CHECK-NEXT: store i64 3, i64* getelementptr inbounds ([0 x i64], [0 x i64]* @ax, i64 0, i64 1), align 4
+; CHECK-NEXT: store i64 3, ptr @ax, align 4
+; CHECK-NEXT: store i64 3, ptr getelementptr inbounds ([0 x i64], ptr @ax, i64 0, i64 1), align 4
; CHECK-NEXT: ret void
;
- %p1 = getelementptr %struct.A_a4_i32_a5, %struct.A_a4_i32_a5* @a_s3_i32_s4, i32 0, i32 0, i32 9
- %len1 = call i64 @strlen(i8* %p1)
- %pax0 = getelementptr inbounds [0 x i64], [0 x i64]* @ax, i64 0, i64 0
- store i64 %len1, i64* %pax0
+ %p1 = getelementptr %struct.A_a4_i32_a5, ptr @a_s3_i32_s4, i32 0, i32 0, i32 9
+ %len1 = call i64 @strlen(ptr %p1)
+ store i64 %len1, ptr @ax
- %p2 = getelementptr %struct.A_a4_i32_a5, %struct.A_a4_i32_a5* @a_s3_i32_s4, i32 0, i32 2, i32 0
- %len2 = call i64 @strlen(i8* %p2)
- %pax1 = getelementptr inbounds [0 x i64], [0 x i64]* @ax, i64 0, i64 1
- store i64 %len1, i64* %pax1
+ %p2 = getelementptr %struct.A_a4_i32_a5, ptr @a_s3_i32_s4, i32 0, i32 2, i32 0
+ %len2 = call i64 @strlen(ptr %p2)
+ %pax1 = getelementptr inbounds [0 x i64], ptr @ax, i64 0, i64 1
+ store i64 %len1, ptr %pax1
ret void
}
define void @fold_strlen_a_s3_i32_S4_p2_to_2() {
; CHECK-LABEL: @fold_strlen_a_s3_i32_S4_p2_to_2(
-; CHECK-NEXT: store i64 2, i64* getelementptr inbounds ([0 x i64], [0 x i64]* @ax, i64 0, i64 0), align 4
-; CHECK-NEXT: store i64 2, i64* getelementptr inbounds ([0 x i64], [0 x i64]* @ax, i64 0, i64 1), align 4
+; CHECK-NEXT: store i64 2, ptr @ax, align 4
+; CHECK-NEXT: store i64 2, ptr getelementptr inbounds ([0 x i64], ptr @ax, i64 0, i64 1), align 4
; CHECK-NEXT: ret void
;
- %p1 = getelementptr %struct.A_a4_i32_a5, %struct.A_a4_i32_a5* @a_s3_i32_s4, i32 0, i32 0, i32 10
- %len1 = call i64 @strlen(i8* %p1)
- %pax0 = getelementptr inbounds [0 x i64], [0 x i64]* @ax, i64 0, i64 0
- store i64 %len1, i64* %pax0
+ %p1 = getelementptr %struct.A_a4_i32_a5, ptr @a_s3_i32_s4, i32 0, i32 0, i32 10
+ %len1 = call i64 @strlen(ptr %p1)
+ store i64 %len1, ptr @ax
- %p2 = getelementptr %struct.A_a4_i32_a5, %struct.A_a4_i32_a5* @a_s3_i32_s4, i32 0, i32 2, i32 2
- %len2 = call i64 @strlen(i8* %p2)
- %pax1 = getelementptr inbounds [0 x i64], [0 x i64]* @ax, i64 0, i64 1
- store i64 %len1, i64* %pax1
+ %p2 = getelementptr %struct.A_a4_i32_a5, ptr @a_s3_i32_s4, i32 0, i32 2, i32 2
+ %len2 = call i64 @strlen(ptr %p2)
+ %pax1 = getelementptr inbounds [0 x i64], ptr @ax, i64 0, i64 1
+ store i64 %len1, ptr %pax1
ret void
}
define void @fold_strlen_a_s3_i32_S4_p3_to_1() {
; CHECK-LABEL: @fold_strlen_a_s3_i32_S4_p3_to_1(
-; CHECK-NEXT: store i64 1, i64* getelementptr inbounds ([0 x i64], [0 x i64]* @ax, i64 0, i64 0), align 4
-; CHECK-NEXT: store i64 1, i64* getelementptr inbounds ([0 x i64], [0 x i64]* @ax, i64 0, i64 1), align 4
+; CHECK-NEXT: store i64 1, ptr @ax, align 4
+; CHECK-NEXT: store i64 1, ptr getelementptr inbounds ([0 x i64], ptr @ax, i64 0, i64 1), align 4
; CHECK-NEXT: ret void
;
- %p1 = getelementptr %struct.A_a4_i32_a5, %struct.A_a4_i32_a5* @a_s3_i32_s4, i32 0, i32 0, i32 11
- %len1 = call i64 @strlen(i8* %p1)
- %pax0 = getelementptr inbounds [0 x i64], [0 x i64]* @ax, i64 0, i64 0
- store i64 %len1, i64* %pax0
+ %p1 = getelementptr %struct.A_a4_i32_a5, ptr @a_s3_i32_s4, i32 0, i32 0, i32 11
+ %len1 = call i64 @strlen(ptr %p1)
+ store i64 %len1, ptr @ax
- %p2 = getelementptr %struct.A_a4_i32_a5, %struct.A_a4_i32_a5* @a_s3_i32_s4, i32 0, i32 2, i32 3
- %len2 = call i64 @strlen(i8* %p2)
- %pax1 = getelementptr inbounds [0 x i64], [0 x i64]* @ax, i64 0, i64 1
- store i64 %len1, i64* %pax1
+ %p2 = getelementptr %struct.A_a4_i32_a5, ptr @a_s3_i32_s4, i32 0, i32 2, i32 3
+ %len2 = call i64 @strlen(ptr %p2)
+ %pax1 = getelementptr inbounds [0 x i64], ptr @ax, i64 0, i64 1
+ store i64 %len1, ptr %pax1
ret void
}
define void @fold_strlen_a_s3_i32_S4_p4_to_0() {
; CHECK-LABEL: @fold_strlen_a_s3_i32_S4_p4_to_0(
-; CHECK-NEXT: store i64 0, i64* getelementptr inbounds ([0 x i64], [0 x i64]* @ax, i64 0, i64 0), align 4
-; CHECK-NEXT: store i64 0, i64* getelementptr inbounds ([0 x i64], [0 x i64]* @ax, i64 0, i64 1), align 4
+; CHECK-NEXT: store i64 0, ptr @ax, align 4
+; CHECK-NEXT: store i64 0, ptr getelementptr inbounds ([0 x i64], ptr @ax, i64 0, i64 1), align 4
; CHECK-NEXT: ret void
;
- %p1 = getelementptr %struct.A_a4_i32_a5, %struct.A_a4_i32_a5* @a_s3_i32_s4, i32 0, i32 0, i32 12
- %len1 = call i64 @strlen(i8* %p1)
- %pax0 = getelementptr inbounds [0 x i64], [0 x i64]* @ax, i64 0, i64 0
- store i64 %len1, i64* %pax0
+ %p1 = getelementptr %struct.A_a4_i32_a5, ptr @a_s3_i32_s4, i32 0, i32 0, i32 12
+ %len1 = call i64 @strlen(ptr %p1)
+ store i64 %len1, ptr @ax
- %p2 = getelementptr %struct.A_a4_i32_a5, %struct.A_a4_i32_a5* @a_s3_i32_s4, i32 0, i32 2, i32 4
- %len2 = call i64 @strlen(i8* %p2)
- %pax1 = getelementptr inbounds [0 x i64], [0 x i64]* @ax, i64 0, i64 1
- store i64 %len1, i64* %pax1
+ %p2 = getelementptr %struct.A_a4_i32_a5, ptr @a_s3_i32_s4, i32 0, i32 2, i32 4
+ %len2 = call i64 @strlen(ptr %p2)
+ %pax1 = getelementptr inbounds [0 x i64], ptr @ax, i64 0, i64 1
+ store i64 %len1, ptr %pax1
ret void
}
define void @fold_strlen_ax_s() {
; CHECK-LABEL: @fold_strlen_ax_s(
-; CHECK-NEXT: store i64 3, i64* getelementptr inbounds ([0 x i64], [0 x i64]* @ax, i64 0, i64 0), align 4
-; CHECK-NEXT: store i64 5, i64* getelementptr inbounds ([0 x i64], [0 x i64]* @ax, i64 0, i64 1), align 4
-; CHECK-NEXT: store i64 7, i64* getelementptr inbounds ([0 x i64], [0 x i64]* @ax, i64 0, i64 2), align 4
+; CHECK-NEXT: store i64 3, ptr @ax, align 4
+; CHECK-NEXT: store i64 5, ptr getelementptr inbounds ([0 x i64], ptr @ax, i64 0, i64 1), align 4
+; CHECK-NEXT: store i64 7, ptr getelementptr inbounds ([0 x i64], ptr @ax, i64 0, i64 2), align 4
; CHECK-NEXT: ret void
;
- %pax_s3 = getelementptr { i8, [4 x i8] }, { i8, [4 x i8] }* @ax_s3, i64 0, i32 1, i64 0
- %len3 = call i64 @strlen(i8* %pax_s3)
- %pax1 = getelementptr inbounds [0 x i64], [0 x i64]* @ax, i64 0, i64 0
- store i64 %len3, i64* %pax1
-
- %pax_s5 = getelementptr { i16, [6 x i8] }, { i16, [6 x i8] }* @ax_s5, i64 0, i32 1, i64 0
- %len5 = call i64 @strlen(i8* %pax_s5)
- %pax2 = getelementptr inbounds [0 x i64], [0 x i64]* @ax, i64 0, i64 1
- store i64 %len5, i64* %pax2
-
- %pax_s7 = getelementptr { i32, i32, [8 x i8] }, { i32, i32, [8 x i8] }* @ax_s7, i64 0, i32 2, i64 0
- %len7 = call i64 @strlen(i8* %pax_s7)
- %pax3 = getelementptr inbounds [0 x i64], [0 x i64]* @ax, i64 0, i64 2
- store i64 %len7, i64* %pax3
+ %pax_s3 = getelementptr { i8, [4 x i8] }, ptr @ax_s3, i64 0, i32 1, i64 0
+ %len3 = call i64 @strlen(ptr %pax_s3)
+ store i64 %len3, ptr @ax
+
+ %pax_s5 = getelementptr { i16, [6 x i8] }, ptr @ax_s5, i64 0, i32 1, i64 0
+ %len5 = call i64 @strlen(ptr %pax_s5)
+ %pax2 = getelementptr inbounds [0 x i64], ptr @ax, i64 0, i64 1
+ store i64 %len5, ptr %pax2
+
+ %pax_s7 = getelementptr { i32, i32, [8 x i8] }, ptr @ax_s7, i64 0, i32 2, i64 0
+ %len7 = call i64 @strlen(ptr %pax_s7)
+ %pax3 = getelementptr inbounds [0 x i64], ptr @ax, i64 0, i64 2
+ store i64 %len7, ptr %pax3
ret void
}
;
; RUN: opt < %s -passes=instcombine -S | FileCheck %s
-declare i64 @strlen(i8*)
+declare i64 @strlen(ptr)
%struct.A = type { [4 x i8], [5 x i8] }
; Fold strlen(a[I].a + J) and strlen(a[I].b + J) with constant I and J
; to constants.
-define void @fold_strlen_A(i64* %plen) {
+define void @fold_strlen_A(ptr %plen) {
; CHECK-LABEL: @fold_strlen_A(
-; CHECK-NEXT: store i64 1, i64* [[PLEN:%.*]], align 4
-; CHECK-NEXT: [[PLEN1:%.*]] = getelementptr i64, i64* [[PLEN]], i64 1
-; CHECK-NEXT: store i64 0, i64* [[PLEN1]], align 4
-; CHECK-NEXT: [[PLEN2:%.*]] = getelementptr i64, i64* [[PLEN]], i64 2
-; CHECK-NEXT: store i64 0, i64* [[PLEN2]], align 4
-; CHECK-NEXT: [[PLEN3:%.*]] = getelementptr i64, i64* [[PLEN]], i64 3
-; CHECK-NEXT: store i64 0, i64* [[PLEN3]], align 4
-; CHECK-NEXT: [[PLEN4:%.*]] = getelementptr i64, i64* [[PLEN]], i64 4
-; CHECK-NEXT: store i64 2, i64* [[PLEN4]], align 4
-; CHECK-NEXT: [[PLEN5:%.*]] = getelementptr i64, i64* [[PLEN]], i64 5
-; CHECK-NEXT: store i64 1, i64* [[PLEN5]], align 4
-; CHECK-NEXT: [[PLEN6:%.*]] = getelementptr i64, i64* [[PLEN]], i64 6
-; CHECK-NEXT: store i64 0, i64* [[PLEN6]], align 4
-; CHECK-NEXT: [[PLEN7:%.*]] = getelementptr i64, i64* [[PLEN]], i64 7
-; CHECK-NEXT: store i64 0, i64* [[PLEN7]], align 4
-; CHECK-NEXT: [[PLEN8:%.*]] = getelementptr i64, i64* [[PLEN]], i64 8
-; CHECK-NEXT: store i64 0, i64* [[PLEN8]], align 4
-; CHECK-NEXT: [[PLEN9:%.*]] = getelementptr i64, i64* [[PLEN]], i64 9
-; CHECK-NEXT: store i64 3, i64* [[PLEN9]], align 4
-; CHECK-NEXT: [[PLEN10:%.*]] = getelementptr i64, i64* [[PLEN]], i64 10
-; CHECK-NEXT: store i64 2, i64* [[PLEN10]], align 4
-; CHECK-NEXT: [[PLEN11:%.*]] = getelementptr i64, i64* [[PLEN]], i64 11
-; CHECK-NEXT: store i64 1, i64* [[PLEN11]], align 4
-; CHECK-NEXT: [[PLEN12:%.*]] = getelementptr i64, i64* [[PLEN]], i64 12
-; CHECK-NEXT: store i64 0, i64* [[PLEN12]], align 4
-; CHECK-NEXT: [[PLEN14:%.*]] = getelementptr i64, i64* [[PLEN]], i64 14
-; CHECK-NEXT: store i64 4, i64* [[PLEN14]], align 4
-; CHECK-NEXT: [[PLEN15:%.*]] = getelementptr i64, i64* [[PLEN]], i64 15
-; CHECK-NEXT: store i64 3, i64* [[PLEN15]], align 4
-; CHECK-NEXT: [[PLEN16:%.*]] = getelementptr i64, i64* [[PLEN]], i64 16
-; CHECK-NEXT: store i64 2, i64* [[PLEN16]], align 4
-; CHECK-NEXT: [[PLEN17:%.*]] = getelementptr i64, i64* [[PLEN]], i64 17
-; CHECK-NEXT: store i64 1, i64* [[PLEN17]], align 4
-; CHECK-NEXT: [[PLEN18:%.*]] = getelementptr i64, i64* [[PLEN]], i64 18
-; CHECK-NEXT: store i64 0, i64* [[PLEN18]], align 4
+; CHECK-NEXT: store i64 1, ptr [[PLEN:%.*]], align 4
+; CHECK-NEXT: [[PLEN1:%.*]] = getelementptr i64, ptr [[PLEN]], i64 1
+; CHECK-NEXT: store i64 0, ptr [[PLEN1]], align 4
+; CHECK-NEXT: [[PLEN2:%.*]] = getelementptr i64, ptr [[PLEN]], i64 2
+; CHECK-NEXT: store i64 0, ptr [[PLEN2]], align 4
+; CHECK-NEXT: [[PLEN3:%.*]] = getelementptr i64, ptr [[PLEN]], i64 3
+; CHECK-NEXT: store i64 0, ptr [[PLEN3]], align 4
+; CHECK-NEXT: [[PLEN4:%.*]] = getelementptr i64, ptr [[PLEN]], i64 4
+; CHECK-NEXT: store i64 2, ptr [[PLEN4]], align 4
+; CHECK-NEXT: [[PLEN5:%.*]] = getelementptr i64, ptr [[PLEN]], i64 5
+; CHECK-NEXT: store i64 1, ptr [[PLEN5]], align 4
+; CHECK-NEXT: [[PLEN6:%.*]] = getelementptr i64, ptr [[PLEN]], i64 6
+; CHECK-NEXT: store i64 0, ptr [[PLEN6]], align 4
+; CHECK-NEXT: [[PLEN7:%.*]] = getelementptr i64, ptr [[PLEN]], i64 7
+; CHECK-NEXT: store i64 0, ptr [[PLEN7]], align 4
+; CHECK-NEXT: [[PLEN8:%.*]] = getelementptr i64, ptr [[PLEN]], i64 8
+; CHECK-NEXT: store i64 0, ptr [[PLEN8]], align 4
+; CHECK-NEXT: [[PLEN9:%.*]] = getelementptr i64, ptr [[PLEN]], i64 9
+; CHECK-NEXT: store i64 3, ptr [[PLEN9]], align 4
+; CHECK-NEXT: [[PLEN10:%.*]] = getelementptr i64, ptr [[PLEN]], i64 10
+; CHECK-NEXT: store i64 2, ptr [[PLEN10]], align 4
+; CHECK-NEXT: [[PLEN11:%.*]] = getelementptr i64, ptr [[PLEN]], i64 11
+; CHECK-NEXT: store i64 1, ptr [[PLEN11]], align 4
+; CHECK-NEXT: [[PLEN12:%.*]] = getelementptr i64, ptr [[PLEN]], i64 12
+; CHECK-NEXT: store i64 0, ptr [[PLEN12]], align 4
+; CHECK-NEXT: [[PLEN14:%.*]] = getelementptr i64, ptr [[PLEN]], i64 14
+; CHECK-NEXT: store i64 4, ptr [[PLEN14]], align 4
+; CHECK-NEXT: [[PLEN15:%.*]] = getelementptr i64, ptr [[PLEN]], i64 15
+; CHECK-NEXT: store i64 3, ptr [[PLEN15]], align 4
+; CHECK-NEXT: [[PLEN16:%.*]] = getelementptr i64, ptr [[PLEN]], i64 16
+; CHECK-NEXT: store i64 2, ptr [[PLEN16]], align 4
+; CHECK-NEXT: [[PLEN17:%.*]] = getelementptr i64, ptr [[PLEN]], i64 17
+; CHECK-NEXT: store i64 1, ptr [[PLEN17]], align 4
+; CHECK-NEXT: [[PLEN18:%.*]] = getelementptr i64, ptr [[PLEN]], i64 18
+; CHECK-NEXT: store i64 0, ptr [[PLEN18]], align 4
; CHECK-NEXT: ret void
;
; Fold strlen(a[0].a) to 1.
- %pa0a = getelementptr [2 x %struct.A], [2 x %struct.A]* @a, i64 0, i64 0, i32 0, i64 0
- %lena0a = call i64 @strlen(i8* %pa0a)
- %plen0 = getelementptr i64, i64* %plen, i32 0
- store i64 %lena0a, i64* %plen0
+ %lena0a = call i64 @strlen(ptr @a)
+ store i64 %lena0a, ptr %plen
; Fold strlen(a[0].a + 1) to 0.
- %pa0ap1 = getelementptr [2 x %struct.A], [2 x %struct.A]* @a, i64 0, i64 0, i32 0, i64 1
- %lena0ap1 = call i64 @strlen(i8* %pa0ap1)
- %plen1 = getelementptr i64, i64* %plen, i32 1
- store i64 %lena0ap1, i64* %plen1
+ %pa0ap1 = getelementptr [2 x %struct.A], ptr @a, i64 0, i64 0, i32 0, i64 1
+ %lena0ap1 = call i64 @strlen(ptr %pa0ap1)
+ %plen1 = getelementptr i64, ptr %plen, i32 1
+ store i64 %lena0ap1, ptr %plen1
; Fold strlen(a[0].a + 2) to 0.
- %pa0ap2 = getelementptr [2 x %struct.A], [2 x %struct.A]* @a, i64 0, i64 0, i32 0, i64 2
- %lena0ap2 = call i64 @strlen(i8* %pa0ap2)
- %plen2 = getelementptr i64, i64* %plen, i32 2
- store i64 %lena0ap2, i64* %plen2
+ %pa0ap2 = getelementptr [2 x %struct.A], ptr @a, i64 0, i64 0, i32 0, i64 2
+ %lena0ap2 = call i64 @strlen(ptr %pa0ap2)
+ %plen2 = getelementptr i64, ptr %plen, i32 2
+ store i64 %lena0ap2, ptr %plen2
; Fold strlen(a[0].a + 3) to 0.
- %pa0ap3 = getelementptr [2 x %struct.A], [2 x %struct.A]* @a, i64 0, i64 0, i32 0, i64 3
- %lena0ap3 = call i64 @strlen(i8* %pa0ap3)
- %plen3 = getelementptr i64, i64* %plen, i32 3
- store i64 %lena0ap3, i64* %plen3
+ %pa0ap3 = getelementptr [2 x %struct.A], ptr @a, i64 0, i64 0, i32 0, i64 3
+ %lena0ap3 = call i64 @strlen(ptr %pa0ap3)
+ %plen3 = getelementptr i64, ptr %plen, i32 3
+ store i64 %lena0ap3, ptr %plen3
; Fold strlen(a[0].b) to 2.
- %pa0b = getelementptr [2 x %struct.A], [2 x %struct.A]* @a, i64 0, i64 0, i32 1, i64 0
- %lena0b = call i64 @strlen(i8* %pa0b)
- %plen4 = getelementptr i64, i64* %plen, i32 4
- store i64 %lena0b, i64* %plen4
+ %pa0b = getelementptr [2 x %struct.A], ptr @a, i64 0, i64 0, i32 1, i64 0
+ %lena0b = call i64 @strlen(ptr %pa0b)
+ %plen4 = getelementptr i64, ptr %plen, i32 4
+ store i64 %lena0b, ptr %plen4
; Fold strlen(a[0].b + 1) to 1.
- %pa0bp1 = getelementptr [2 x %struct.A], [2 x %struct.A]* @a, i64 0, i64 0, i32 1, i64 1
- %lena0bp1 = call i64 @strlen(i8* %pa0bp1)
- %plen5 = getelementptr i64, i64* %plen, i32 5
- store i64 %lena0bp1, i64* %plen5
+ %pa0bp1 = getelementptr [2 x %struct.A], ptr @a, i64 0, i64 0, i32 1, i64 1
+ %lena0bp1 = call i64 @strlen(ptr %pa0bp1)
+ %plen5 = getelementptr i64, ptr %plen, i32 5
+ store i64 %lena0bp1, ptr %plen5
; Fold strlen(a[0].b + 2) to 0.
- %pa0bp2 = getelementptr [2 x %struct.A], [2 x %struct.A]* @a, i64 0, i64 0, i32 1, i64 2
- %lena0bp2 = call i64 @strlen(i8* %pa0bp2)
- %plen6 = getelementptr i64, i64* %plen, i32 6
- store i64 %lena0bp2, i64* %plen6
+ %pa0bp2 = getelementptr [2 x %struct.A], ptr @a, i64 0, i64 0, i32 1, i64 2
+ %lena0bp2 = call i64 @strlen(ptr %pa0bp2)
+ %plen6 = getelementptr i64, ptr %plen, i32 6
+ store i64 %lena0bp2, ptr %plen6
; Fold strlen(a[0].b + 3) to 0.
- %pa0bp3 = getelementptr [2 x %struct.A], [2 x %struct.A]* @a, i64 0, i64 0, i32 1, i64 3
- %lena0bp3 = call i64 @strlen(i8* %pa0bp3)
- %plen7 = getelementptr i64, i64* %plen, i32 7
- store i64 %lena0bp3, i64* %plen7
+ %pa0bp3 = getelementptr [2 x %struct.A], ptr @a, i64 0, i64 0, i32 1, i64 3
+ %lena0bp3 = call i64 @strlen(ptr %pa0bp3)
+ %plen7 = getelementptr i64, ptr %plen, i32 7
+ store i64 %lena0bp3, ptr %plen7
; Fold strlen(a[0].b + 4) to 0.
- %pa0bp4 = getelementptr [2 x %struct.A], [2 x %struct.A]* @a, i64 0, i64 0, i32 1, i64 4
- %lena0bp4 = call i64 @strlen(i8* %pa0bp4)
- %plen8 = getelementptr i64, i64* %plen, i32 8
- store i64 %lena0bp4, i64* %plen8
+ %pa0bp4 = getelementptr [2 x %struct.A], ptr @a, i64 0, i64 0, i32 1, i64 4
+ %lena0bp4 = call i64 @strlen(ptr %pa0bp4)
+ %plen8 = getelementptr i64, ptr %plen, i32 8
+ store i64 %lena0bp4, ptr %plen8
; Fold strlen(a[1].a) to 3.
- %pa1a = getelementptr [2 x %struct.A], [2 x %struct.A]* @a, i64 0, i64 1, i32 0, i64 0
- %lena1a = call i64 @strlen(i8* %pa1a)
- %plen9 = getelementptr i64, i64* %plen, i32 9
- store i64 %lena1a, i64* %plen9
+ %pa1a = getelementptr [2 x %struct.A], ptr @a, i64 0, i64 1, i32 0, i64 0
+ %lena1a = call i64 @strlen(ptr %pa1a)
+ %plen9 = getelementptr i64, ptr %plen, i32 9
+ store i64 %lena1a, ptr %plen9
; Fold strlen(a[1].a + 1) to 2.
- %pa1ap1 = getelementptr [2 x %struct.A], [2 x %struct.A]* @a, i64 0, i64 1, i32 0, i64 1
- %lena1ap1 = call i64 @strlen(i8* %pa1ap1)
- %plen10 = getelementptr i64, i64* %plen, i32 10
- store i64 %lena1ap1, i64* %plen10
+ %pa1ap1 = getelementptr [2 x %struct.A], ptr @a, i64 0, i64 1, i32 0, i64 1
+ %lena1ap1 = call i64 @strlen(ptr %pa1ap1)
+ %plen10 = getelementptr i64, ptr %plen, i32 10
+ store i64 %lena1ap1, ptr %plen10
; Fold strlen(a[1].a + 2) to 1.
- %pa1ap2 = getelementptr [2 x %struct.A], [2 x %struct.A]* @a, i64 0, i64 1, i32 0, i64 2
- %lena1ap2 = call i64 @strlen(i8* %pa1ap2)
- %plen11 = getelementptr i64, i64* %plen, i32 11
- store i64 %lena1ap2, i64* %plen11
+ %pa1ap2 = getelementptr [2 x %struct.A], ptr @a, i64 0, i64 1, i32 0, i64 2
+ %lena1ap2 = call i64 @strlen(ptr %pa1ap2)
+ %plen11 = getelementptr i64, ptr %plen, i32 11
+ store i64 %lena1ap2, ptr %plen11
; Fold strlen(a[1].a + 3) to 0.
- %pa1ap3 = getelementptr [2 x %struct.A], [2 x %struct.A]* @a, i64 0, i64 1, i32 0, i64 3
- %lena1ap3 = call i64 @strlen(i8* %pa1ap3)
- %plen12 = getelementptr i64, i64* %plen, i32 12
- store i64 %lena1ap3, i64* %plen12
+ %pa1ap3 = getelementptr [2 x %struct.A], ptr @a, i64 0, i64 1, i32 0, i64 3
+ %lena1ap3 = call i64 @strlen(ptr %pa1ap3)
+ %plen12 = getelementptr i64, ptr %plen, i32 12
+ store i64 %lena1ap3, ptr %plen12
; Fold strlen(a[1].b) to 4.
- %pa1b = getelementptr [2 x %struct.A], [2 x %struct.A]* @a, i64 0, i64 1, i32 1, i64 0
- %lena1b = call i64 @strlen(i8* %pa1b)
- %plen14 = getelementptr i64, i64* %plen, i32 14
- store i64 %lena1b, i64* %plen14
+ %pa1b = getelementptr [2 x %struct.A], ptr @a, i64 0, i64 1, i32 1, i64 0
+ %lena1b = call i64 @strlen(ptr %pa1b)
+ %plen14 = getelementptr i64, ptr %plen, i32 14
+ store i64 %lena1b, ptr %plen14
; Fold strlen(a[1].b + 1) to 3.
- %pa1bp1 = getelementptr [2 x %struct.A], [2 x %struct.A]* @a, i64 0, i64 1, i32 1, i64 1
- %lena1bp1 = call i64 @strlen(i8* %pa1bp1)
- %plen15 = getelementptr i64, i64* %plen, i32 15
- store i64 %lena1bp1, i64* %plen15
+ %pa1bp1 = getelementptr [2 x %struct.A], ptr @a, i64 0, i64 1, i32 1, i64 1
+ %lena1bp1 = call i64 @strlen(ptr %pa1bp1)
+ %plen15 = getelementptr i64, ptr %plen, i32 15
+ store i64 %lena1bp1, ptr %plen15
; Fold strlen(a[1].b + 2) to 2.
- %pa1bp2 = getelementptr [2 x %struct.A], [2 x %struct.A]* @a, i64 0, i64 1, i32 1, i64 2
- %lena1bp2 = call i64 @strlen(i8* %pa1bp2)
- %plen16 = getelementptr i64, i64* %plen, i32 16
- store i64 %lena1bp2, i64* %plen16
+ %pa1bp2 = getelementptr [2 x %struct.A], ptr @a, i64 0, i64 1, i32 1, i64 2
+ %lena1bp2 = call i64 @strlen(ptr %pa1bp2)
+ %plen16 = getelementptr i64, ptr %plen, i32 16
+ store i64 %lena1bp2, ptr %plen16
; Fold strlen(a[1].b + 3) to 1.
- %pa1bp3 = getelementptr [2 x %struct.A], [2 x %struct.A]* @a, i64 0, i64 1, i32 1, i64 3
- %lena1bp3 = call i64 @strlen(i8* %pa1bp3)
- %plen17 = getelementptr i64, i64* %plen, i32 17
- store i64 %lena1bp3, i64* %plen17
+ %pa1bp3 = getelementptr [2 x %struct.A], ptr @a, i64 0, i64 1, i32 1, i64 3
+ %lena1bp3 = call i64 @strlen(ptr %pa1bp3)
+ %plen17 = getelementptr i64, ptr %plen, i32 17
+ store i64 %lena1bp3, ptr %plen17
; Fold strlen(a[1].b + 4) to 0.
- %pa1bp4 = getelementptr [2 x %struct.A], [2 x %struct.A]* @a, i64 0, i64 1, i32 1, i64 4
- %lena1bp4 = call i64 @strlen(i8* %pa1bp4)
- %plen18 = getelementptr i64, i64* %plen, i32 18
- store i64 %lena1bp4, i64* %plen18
+ %pa1bp4 = getelementptr [2 x %struct.A], ptr @a, i64 0, i64 1, i32 1, i64 4
+ %lena1bp4 = call i64 @strlen(ptr %pa1bp4)
+ %plen18 = getelementptr i64, ptr %plen, i32 18
+ store i64 %lena1bp4, ptr %plen18
ret void
}
; TODO: Fold strlen(a[I].a + X) and strlen(a[I].b + X) with constant I and
; variable X to (X - strlen(a[I].a)) and (X - strlen(a[I].b)) respectively.
-define void @fold_strlen_A_pI(i64* %plen, i64 %I) {
+define void @fold_strlen_A_pI(ptr %plen, i64 %I) {
; CHECK-LABEL: @fold_strlen_A_pI(
-; CHECK-NEXT: [[PA0A:%.*]] = getelementptr [2 x %struct.A], [2 x %struct.A]* @a, i64 0, i64 0, i32 0, i64 [[I:%.*]]
-; CHECK-NEXT: [[LENA0A:%.*]] = call i64 @strlen(i8* noundef nonnull dereferenceable(1) [[PA0A]])
-; CHECK-NEXT: store i64 [[LENA0A]], i64* [[PLEN:%.*]], align 4
-; CHECK-NEXT: [[PA0B:%.*]] = getelementptr [2 x %struct.A], [2 x %struct.A]* @a, i64 0, i64 0, i32 1, i64 [[I]]
-; CHECK-NEXT: [[LENA0B:%.*]] = call i64 @strlen(i8* noundef nonnull dereferenceable(1) [[PA0B]])
-; CHECK-NEXT: [[PLEN1:%.*]] = getelementptr i64, i64* [[PLEN]], i64 1
-; CHECK-NEXT: store i64 [[LENA0B]], i64* [[PLEN1]], align 4
-; CHECK-NEXT: [[PA1A:%.*]] = getelementptr [2 x %struct.A], [2 x %struct.A]* @a, i64 0, i64 1, i32 0, i64 [[I]]
-; CHECK-NEXT: [[LENA1A:%.*]] = call i64 @strlen(i8* noundef nonnull dereferenceable(1) [[PA1A]])
-; CHECK-NEXT: [[PLEN2:%.*]] = getelementptr i64, i64* [[PLEN]], i64 2
-; CHECK-NEXT: store i64 [[LENA1A]], i64* [[PLEN2]], align 4
-; CHECK-NEXT: [[PA1B:%.*]] = getelementptr [2 x %struct.A], [2 x %struct.A]* @a, i64 0, i64 1, i32 1, i64 [[I]]
-; CHECK-NEXT: [[LENA1B:%.*]] = call i64 @strlen(i8* noundef nonnull dereferenceable(1) [[PA1B]])
-; CHECK-NEXT: [[PLEN3:%.*]] = getelementptr i64, i64* [[PLEN]], i64 3
-; CHECK-NEXT: store i64 [[LENA1B]], i64* [[PLEN3]], align 4
+; CHECK-NEXT: [[PA0A:%.*]] = getelementptr [2 x %struct.A], ptr @a, i64 0, i64 0, i32 0, i64 [[I:%.*]]
+; CHECK-NEXT: [[LENA0A:%.*]] = call i64 @strlen(ptr noundef nonnull dereferenceable(1) [[PA0A]])
+; CHECK-NEXT: store i64 [[LENA0A]], ptr [[PLEN:%.*]], align 4
+; CHECK-NEXT: [[PA0B:%.*]] = getelementptr [2 x %struct.A], ptr @a, i64 0, i64 0, i32 1, i64 [[I]]
+; CHECK-NEXT: [[LENA0B:%.*]] = call i64 @strlen(ptr noundef nonnull dereferenceable(1) [[PA0B]])
+; CHECK-NEXT: [[PLEN1:%.*]] = getelementptr i64, ptr [[PLEN]], i64 1
+; CHECK-NEXT: store i64 [[LENA0B]], ptr [[PLEN1]], align 4
+; CHECK-NEXT: [[PA1A:%.*]] = getelementptr [2 x %struct.A], ptr @a, i64 0, i64 1, i32 0, i64 [[I]]
+; CHECK-NEXT: [[LENA1A:%.*]] = call i64 @strlen(ptr noundef nonnull dereferenceable(1) [[PA1A]])
+; CHECK-NEXT: [[PLEN2:%.*]] = getelementptr i64, ptr [[PLEN]], i64 2
+; CHECK-NEXT: store i64 [[LENA1A]], ptr [[PLEN2]], align 4
+; CHECK-NEXT: [[PA1B:%.*]] = getelementptr [2 x %struct.A], ptr @a, i64 0, i64 1, i32 1, i64 [[I]]
+; CHECK-NEXT: [[LENA1B:%.*]] = call i64 @strlen(ptr noundef nonnull dereferenceable(1) [[PA1B]])
+; CHECK-NEXT: [[PLEN3:%.*]] = getelementptr i64, ptr [[PLEN]], i64 3
+; CHECK-NEXT: store i64 [[LENA1B]], ptr [[PLEN3]], align 4
; CHECK-NEXT: ret void
;
- %pa0a = getelementptr [2 x %struct.A], [2 x %struct.A]* @a, i64 0, i64 0, i32 0, i64 %I
- %lena0a = call i64 @strlen(i8* %pa0a)
- %plen0 = getelementptr i64, i64* %plen, i32 0
- store i64 %lena0a, i64* %plen0
-
- %pa0b = getelementptr [2 x %struct.A], [2 x %struct.A]* @a, i64 0, i64 0, i32 1, i64 %I
- %lena0b = call i64 @strlen(i8* %pa0b)
- %plen1 = getelementptr i64, i64* %plen, i32 1
- store i64 %lena0b, i64* %plen1
-
- %pa1a = getelementptr [2 x %struct.A], [2 x %struct.A]* @a, i64 0, i64 1, i32 0, i64 %I
- %lena1a = call i64 @strlen(i8* %pa1a)
- %plen2 = getelementptr i64, i64* %plen, i32 2
- store i64 %lena1a, i64* %plen2
-
- %pa1b = getelementptr [2 x %struct.A], [2 x %struct.A]* @a, i64 0, i64 1, i32 1, i64 %I
- %lena1b = call i64 @strlen(i8* %pa1b)
- %plen3 = getelementptr i64, i64* %plen, i32 3
- store i64 %lena1b, i64* %plen3
+ %pa0a = getelementptr [2 x %struct.A], ptr @a, i64 0, i64 0, i32 0, i64 %I
+ %lena0a = call i64 @strlen(ptr %pa0a)
+ store i64 %lena0a, ptr %plen
+
+ %pa0b = getelementptr [2 x %struct.A], ptr @a, i64 0, i64 0, i32 1, i64 %I
+ %lena0b = call i64 @strlen(ptr %pa0b)
+ %plen1 = getelementptr i64, ptr %plen, i32 1
+ store i64 %lena0b, ptr %plen1
+
+ %pa1a = getelementptr [2 x %struct.A], ptr @a, i64 0, i64 1, i32 0, i64 %I
+ %lena1a = call i64 @strlen(ptr %pa1a)
+ %plen2 = getelementptr i64, ptr %plen, i32 2
+ store i64 %lena1a, ptr %plen2
+
+ %pa1b = getelementptr [2 x %struct.A], ptr @a, i64 0, i64 1, i32 1, i64 %I
+ %lena1b = call i64 @strlen(ptr %pa1b)
+ %plen3 = getelementptr i64, ptr %plen, i32 3
+ store i64 %lena1b, ptr %plen3
ret void
}
;
; RUN: opt < %s -passes=instcombine -S | FileCheck %s
-declare i64 @strlen(i8*)
+declare i64 @strlen(ptr)
@a5_4 = constant [5 x [4 x i8]] [[4 x i8] c"123\00", [4 x i8] c"12\00\00", [4 x i8] c"1\00\00\00", [4 x i8] zeroinitializer, [4 x i8] zeroinitializer]
define i64 @fold_a5_4_i0_pI(i64 %I) {
; CHECK-LABEL: @fold_a5_4_i0_pI(
-; CHECK-NEXT: [[PTR:%.*]] = getelementptr [5 x [4 x i8]], [5 x [4 x i8]]* @a5_4, i64 0, i64 0, i64 [[I:%.*]]
-; CHECK-NEXT: [[LEN:%.*]] = call i64 @strlen(i8* noundef nonnull dereferenceable(1) [[PTR]])
+; CHECK-NEXT: [[PTR:%.*]] = getelementptr [5 x [4 x i8]], ptr @a5_4, i64 0, i64 0, i64 [[I:%.*]]
+; CHECK-NEXT: [[LEN:%.*]] = call i64 @strlen(ptr noundef nonnull dereferenceable(1) [[PTR]])
; CHECK-NEXT: ret i64 [[LEN]]
;
- %ptr = getelementptr [5 x [4 x i8]], [5 x [4 x i8]]* @a5_4, i64 0, i64 0, i64 %I
- %len = call i64 @strlen(i8* %ptr)
+ %ptr = getelementptr [5 x [4 x i8]], ptr @a5_4, i64 0, i64 0, i64 %I
+ %len = call i64 @strlen(ptr %ptr)
ret i64 %len
}
define i64 @fold_a5_4_i1_pI(i64 %I) {
; CHECK-LABEL: @fold_a5_4_i1_pI(
-; CHECK-NEXT: [[PTR:%.*]] = getelementptr [5 x [4 x i8]], [5 x [4 x i8]]* @a5_4, i64 0, i64 1, i64 [[I:%.*]]
-; CHECK-NEXT: [[LEN:%.*]] = call i64 @strlen(i8* noundef nonnull dereferenceable(1) [[PTR]])
+; CHECK-NEXT: [[PTR:%.*]] = getelementptr [5 x [4 x i8]], ptr @a5_4, i64 0, i64 1, i64 [[I:%.*]]
+; CHECK-NEXT: [[LEN:%.*]] = call i64 @strlen(ptr noundef nonnull dereferenceable(1) [[PTR]])
; CHECK-NEXT: ret i64 [[LEN]]
;
- %ptr = getelementptr [5 x [4 x i8]], [5 x [4 x i8]]* @a5_4, i64 0, i64 1, i64 %I
- %len = call i64 @strlen(i8* %ptr)
+ %ptr = getelementptr [5 x [4 x i8]], ptr @a5_4, i64 0, i64 1, i64 %I
+ %len = call i64 @strlen(ptr %ptr)
ret i64 %len
}
define i64 @fold_a5_4_i2_pI(i64 %I) {
; CHECK-LABEL: @fold_a5_4_i2_pI(
-; CHECK-NEXT: [[PTR:%.*]] = getelementptr [5 x [4 x i8]], [5 x [4 x i8]]* @a5_4, i64 0, i64 2, i64 [[I:%.*]]
-; CHECK-NEXT: [[LEN:%.*]] = call i64 @strlen(i8* noundef nonnull dereferenceable(1) [[PTR]])
+; CHECK-NEXT: [[PTR:%.*]] = getelementptr [5 x [4 x i8]], ptr @a5_4, i64 0, i64 2, i64 [[I:%.*]]
+; CHECK-NEXT: [[LEN:%.*]] = call i64 @strlen(ptr noundef nonnull dereferenceable(1) [[PTR]])
; CHECK-NEXT: ret i64 [[LEN]]
;
- %ptr = getelementptr [5 x [4 x i8]], [5 x [4 x i8]]* @a5_4, i64 0, i64 2, i64 %I
- %len = call i64 @strlen(i8* %ptr)
+ %ptr = getelementptr [5 x [4 x i8]], ptr @a5_4, i64 0, i64 2, i64 %I
+ %len = call i64 @strlen(ptr %ptr)
ret i64 %len
}
define i64 @fold_a5_4_i3_pI_to_0(i64 %I) {
; CHECK-LABEL: @fold_a5_4_i3_pI_to_0(
-; CHECK-NEXT: [[PTR:%.*]] = getelementptr [5 x [4 x i8]], [5 x [4 x i8]]* @a5_4, i64 0, i64 3, i64 [[I:%.*]]
-; CHECK-NEXT: [[LEN:%.*]] = call i64 @strlen(i8* noundef nonnull dereferenceable(1) [[PTR]])
+; CHECK-NEXT: [[PTR:%.*]] = getelementptr [5 x [4 x i8]], ptr @a5_4, i64 0, i64 3, i64 [[I:%.*]]
+; CHECK-NEXT: [[LEN:%.*]] = call i64 @strlen(ptr noundef nonnull dereferenceable(1) [[PTR]])
; CHECK-NEXT: ret i64 [[LEN]]
;
- %ptr = getelementptr [5 x [4 x i8]], [5 x [4 x i8]]* @a5_4, i64 0, i64 3, i64 %I
- %len = call i64 @strlen(i8* %ptr)
+ %ptr = getelementptr [5 x [4 x i8]], ptr @a5_4, i64 0, i64 3, i64 %I
+ %len = call i64 @strlen(ptr %ptr)
ret i64 %len
}
define i64 @fold_a5_4_i4_pI_to_0(i64 %I) {
; CHECK-LABEL: @fold_a5_4_i4_pI_to_0(
-; CHECK-NEXT: [[PTR:%.*]] = getelementptr [5 x [4 x i8]], [5 x [4 x i8]]* @a5_4, i64 0, i64 4, i64 [[I:%.*]]
-; CHECK-NEXT: [[LEN:%.*]] = call i64 @strlen(i8* noundef nonnull dereferenceable(1) [[PTR]])
+; CHECK-NEXT: [[PTR:%.*]] = getelementptr [5 x [4 x i8]], ptr @a5_4, i64 0, i64 4, i64 [[I:%.*]]
+; CHECK-NEXT: [[LEN:%.*]] = call i64 @strlen(ptr noundef nonnull dereferenceable(1) [[PTR]])
; CHECK-NEXT: ret i64 [[LEN]]
;
- %ptr = getelementptr [5 x [4 x i8]], [5 x [4 x i8]]* @a5_4, i64 0, i64 4, i64 %I
- %len = call i64 @strlen(i8* %ptr)
+ %ptr = getelementptr [5 x [4 x i8]], ptr @a5_4, i64 0, i64 4, i64 %I
+ %len = call i64 @strlen(ptr %ptr)
ret i64 %len
}
;
; RUN: opt < %s -passes=instcombine -S | FileCheck %s
-declare i64 @strlen(i8*)
+declare i64 @strlen(ptr)
@a5 = constant [5 x i8] c"12345"
@s5 = constant [6 x i8] c"12345\00"
; making the library calls even though it prevents sanitizers from reporting
; the bugs.
-define void @fold_strlen_no_nul(i64* %plen, i32 %i) {
+define void @fold_strlen_no_nul(ptr %plen, i32 %i) {
; CHECK-LABEL: @fold_strlen_no_nul(
-; CHECK-NEXT: store i64 5, i64* [[PLEN:%.*]], align 4
-; CHECK-NEXT: [[PNA5_P5:%.*]] = getelementptr i64, i64* [[PLEN]], i64 1
-; CHECK-NEXT: store i64 0, i64* [[PNA5_P5]], align 4
-; CHECK-NEXT: [[PNS5_P6:%.*]] = getelementptr i64, i64* [[PLEN]], i64 2
-; CHECK-NEXT: store i64 0, i64* [[PNS5_P6]], align 4
+; CHECK-NEXT: store i64 5, ptr [[PLEN:%.*]], align 4
+; CHECK-NEXT: [[PNA5_P5:%.*]] = getelementptr i64, ptr [[PLEN]], i64 1
+; CHECK-NEXT: store i64 0, ptr [[PNA5_P5]], align 4
+; CHECK-NEXT: [[PNS5_P6:%.*]] = getelementptr i64, ptr [[PLEN]], i64 2
+; CHECK-NEXT: store i64 0, ptr [[PNS5_P6]], align 4
; CHECK-NEXT: [[TMP1:%.*]] = sext i32 [[I:%.*]] to i64
-; CHECK-NEXT: [[PA5_PI:%.*]] = getelementptr [5 x i8], [5 x i8]* @a5, i64 0, i64 [[TMP1]]
-; CHECK-NEXT: [[NA5_PI:%.*]] = call i64 @strlen(i8* noundef nonnull dereferenceable(1) [[PA5_PI]])
-; CHECK-NEXT: [[PNA5_PI:%.*]] = getelementptr i64, i64* [[PLEN]], i64 3
-; CHECK-NEXT: store i64 [[NA5_PI]], i64* [[PNA5_PI]], align 4
-; CHECK-NEXT: [[PNZ0_P0:%.*]] = getelementptr i64, i64* [[PLEN]], i64 4
-; CHECK-NEXT: store i64 0, i64* [[PNZ0_P0]], align 4
+; CHECK-NEXT: [[PA5_PI:%.*]] = getelementptr [5 x i8], ptr @a5, i64 0, i64 [[TMP1]]
+; CHECK-NEXT: [[NA5_PI:%.*]] = call i64 @strlen(ptr noundef nonnull dereferenceable(1) [[PA5_PI]])
+; CHECK-NEXT: [[PNA5_PI:%.*]] = getelementptr i64, ptr [[PLEN]], i64 3
+; CHECK-NEXT: store i64 [[NA5_PI]], ptr [[PNA5_PI]], align 4
+; CHECK-NEXT: [[PNZ0_P0:%.*]] = getelementptr i64, ptr [[PLEN]], i64 4
+; CHECK-NEXT: store i64 0, ptr [[PNZ0_P0]], align 4
; CHECK-NEXT: [[TMP2:%.*]] = sext i32 [[I]] to i64
-; CHECK-NEXT: [[PZ0_PI:%.*]] = getelementptr [0 x i8], [0 x i8]* @z0, i64 0, i64 [[TMP2]]
-; CHECK-NEXT: [[NZ0_PI:%.*]] = call i64 @strlen(i8* noundef nonnull dereferenceable(1) [[PZ0_PI]])
-; CHECK-NEXT: [[PNZ0_PI:%.*]] = getelementptr i64, i64* [[PLEN]], i64 5
-; CHECK-NEXT: store i64 [[NZ0_PI]], i64* [[PNZ0_PI]], align 4
-; CHECK-NEXT: [[PNZ5_P5:%.*]] = getelementptr i64, i64* [[PLEN]], i64 6
-; CHECK-NEXT: store i64 0, i64* [[PNZ5_P5]], align 4
+; CHECK-NEXT: [[PZ0_PI:%.*]] = getelementptr [0 x i8], ptr @z0, i64 0, i64 [[TMP2]]
+; CHECK-NEXT: [[NZ0_PI:%.*]] = call i64 @strlen(ptr noundef nonnull dereferenceable(1) [[PZ0_PI]])
+; CHECK-NEXT: [[PNZ0_PI:%.*]] = getelementptr i64, ptr [[PLEN]], i64 5
+; CHECK-NEXT: store i64 [[NZ0_PI]], ptr [[PNZ0_PI]], align 4
+; CHECK-NEXT: [[PNZ5_P5:%.*]] = getelementptr i64, ptr [[PLEN]], i64 6
+; CHECK-NEXT: store i64 0, ptr [[PNZ5_P5]], align 4
; CHECK-NEXT: ret void
;
; Verify that strlen(a5) is folded to 5.
- %pa0_p0 = getelementptr [5 x i8], [5 x i8]* @a5, i32 0, i32 0
- %na5_p0 = call i64 @strlen(i8* %pa0_p0)
- %pna5_p0 = getelementptr i64, i64* %plen, i64 0
- store i64 %na5_p0, i64* %pna5_p0
+ %na5_p0 = call i64 @strlen(ptr @a5)
+ store i64 %na5_p0, ptr %plen
; Verify that strlen(a5 + 5) is folded to 0.
- %pa5_p5 = getelementptr [5 x i8], [5 x i8]* @a5, i32 0, i32 5
- %na5_p5 = call i64 @strlen(i8* %pa5_p5)
- %pna5_p5 = getelementptr i64, i64* %plen, i64 1
- store i64 %na5_p5, i64* %pna5_p5
+ %pa5_p5 = getelementptr [5 x i8], ptr @a5, i32 0, i32 5
+ %na5_p5 = call i64 @strlen(ptr %pa5_p5)
+ %pna5_p5 = getelementptr i64, ptr %plen, i64 1
+ store i64 %na5_p5, ptr %pna5_p5
; Verify that strlen(s5 + 6) is folded to 0.
- %ps5_p6 = getelementptr [6 x i8], [6 x i8]* @s5, i32 0, i32 6
- %ns5_p6 = call i64 @strlen(i8* %ps5_p6)
- %pns5_p6 = getelementptr i64, i64* %plen, i64 2
- store i64 %ns5_p6, i64* %pns5_p6
+ %ps5_p6 = getelementptr [6 x i8], ptr @s5, i32 0, i32 6
+ %ns5_p6 = call i64 @strlen(ptr %ps5_p6)
+ %pns5_p6 = getelementptr i64, ptr %plen, i64 2
+ store i64 %ns5_p6, ptr %pns5_p6
; TODO: Verify that strlen(a5 + i) is folded to 5 - i? It's currently
; not folded because the variable offset makes getConstantDataArrayInfo
; fail.
- %pa5_pi = getelementptr [5 x i8], [5 x i8]* @a5, i32 0, i32 %i
- %na5_pi = call i64 @strlen(i8* %pa5_pi)
- %pna5_pi = getelementptr i64, i64* %plen, i64 3
- store i64 %na5_pi, i64* %pna5_pi
+ %pa5_pi = getelementptr [5 x i8], ptr @a5, i32 0, i32 %i
+ %na5_pi = call i64 @strlen(ptr %pa5_pi)
+ %pna5_pi = getelementptr i64, ptr %plen, i64 3
+ store i64 %na5_pi, ptr %pna5_pi
; Verify that strlen(z0) is folded to 0.
- %pz0_p0 = getelementptr [0 x i8], [0 x i8]* @z0, i32 0, i32 0
- %nz0_p0 = call i64 @strlen(i8* %pz0_p0)
- %pnz0_p0 = getelementptr i64, i64* %plen, i64 4
- store i64 %nz0_p0, i64* %pnz0_p0
+ %nz0_p0 = call i64 @strlen(ptr @z0)
+ %pnz0_p0 = getelementptr i64, ptr %plen, i64 4
+ store i64 %nz0_p0, ptr %pnz0_p0
; TODO: Verify that strlen(z0 + i) is folded to 0. As the case above,
; this one is not folded either because the variable offset makes
; getConstantDataArrayInfo fail.
- %pz0_pi = getelementptr [0 x i8], [0 x i8]* @z0, i32 0, i32 %i
- %nz0_pi = call i64 @strlen(i8* %pz0_pi)
- %pnz0_pi = getelementptr i64, i64* %plen, i64 5
- store i64 %nz0_pi, i64* %pnz0_pi
+ %pz0_pi = getelementptr [0 x i8], ptr @z0, i32 0, i32 %i
+ %nz0_pi = call i64 @strlen(ptr %pz0_pi)
+ %pnz0_pi = getelementptr i64, ptr %plen, i64 5
+ store i64 %nz0_pi, ptr %pnz0_pi
; Verify that strlen(z5 + 5) is folded to 0.
- %pz5_p5 = getelementptr [5 x i8], [5 x i8]* @z5, i32 0, i32 5
- %nz5_p5 = call i64 @strlen(i8* %pz5_p5)
- %pnz5_p5 = getelementptr i64, i64* %plen, i64 6
- store i64 %nz5_p5, i64* %pnz5_p5
+ %pz5_p5 = getelementptr [5 x i8], ptr @z5, i32 0, i32 5
+ %nz5_p5 = call i64 @strlen(ptr %pz5_p5)
+ %pnz5_p5 = getelementptr i64, ptr %plen, i64 6
+ store i64 %nz5_p5, ptr %pnz5_p5
ret void
}
@hello = constant [6 x i8] c"hello\00"
@hello_no_nul = constant [5 x i8] c"hello"
-declare i32 @__strlen_chk(i8*, i32)
+declare i32 @__strlen_chk(ptr, i32)
; Check __strlen_chk(string constant) -> strlen or constants
; CHECK-LABEL: @unknown_str_known_object_size
-define i32 @unknown_str_known_object_size(i8* %c) {
+define i32 @unknown_str_known_object_size(ptr %c) {
; CHECK: call i32 @__strlen_chk
- %1 = call i32 @__strlen_chk(i8* %c, i32 8)
+ %1 = call i32 @__strlen_chk(ptr %c, i32 8)
ret i32 %1
}
; CHECK-LABEL: @known_str_known_object_size
-define i32 @known_str_known_object_size(i8* %c) {
+define i32 @known_str_known_object_size(ptr %c) {
; CHECK: ret i32 5
- %1 = call i32 @__strlen_chk(i8* getelementptr inbounds ([6 x i8], [6 x i8]* @hello, i32 0, i32 0), i32 6)
+ %1 = call i32 @__strlen_chk(ptr @hello, i32 6)
ret i32 %1
}
; CHECK-LABEL: @known_str_too_small_object_size
-define i32 @known_str_too_small_object_size(i8* %c) {
+define i32 @known_str_too_small_object_size(ptr %c) {
; CHECK: call i32 @__strlen_chk
- %1 = call i32 @__strlen_chk(i8* getelementptr inbounds ([6 x i8], [6 x i8]* @hello, i32 0, i32 0), i32 5)
+ %1 = call i32 @__strlen_chk(ptr @hello, i32 5)
ret i32 %1
}
; CHECK-LABEL: @known_str_no_nul
-define i32 @known_str_no_nul(i8* %c) {
+define i32 @known_str_no_nul(ptr %c) {
; CHECK: call i32 @__strlen_chk
- %1 = call i32 @__strlen_chk(i8* getelementptr inbounds ([5 x i8], [5 x i8]* @hello_no_nul, i32 0, i32 0), i32 5)
+ %1 = call i32 @__strlen_chk(ptr @hello_no_nul, i32 5)
ret i32 %1
}
; CHECK-LABEL: @unknown_str_unknown_object_size
-define i32 @unknown_str_unknown_object_size(i8* %c) {
+define i32 @unknown_str_unknown_object_size(ptr %c) {
; CHECK: call i32 @strlen
- %1 = call i32 @__strlen_chk(i8* %c, i32 -1)
+ %1 = call i32 @__strlen_chk(ptr %c, i32 -1)
ret i32 %1
}
@null = constant [1 x i8] zeroinitializer
@null_hello = constant [7 x i8] c"\00hello\00"
-declare i8* @strncat(i8*, i8*, i32)
-declare i32 @puts(i8*)
+declare ptr @strncat(ptr, ptr, i32)
+declare i32 @puts(ptr)
define i32 @main() {
; CHECK-LABEL: @main(
-; CHECK-NOT: call i8* @strncat
+; CHECK-NOT: call ptr @strncat
; CHECK: call i32 @puts
%target = alloca [1024 x i8]
- %arg1 = getelementptr [1024 x i8], [1024 x i8]* %target, i32 0, i32 0
- store i8 0, i8* %arg1
+ store i8 0, ptr %target
; rslt1 = strncat(target, "hello\00")
- %arg2 = getelementptr [6 x i8], [6 x i8]* @hello, i32 0, i32 0
- %rslt1 = call i8* @strncat(i8* %arg1, i8* %arg2, i32 6)
+ %rslt1 = call ptr @strncat(ptr %target, ptr @hello, i32 6)
; rslt2 = strncat(rslt1, "\00")
- %arg3 = getelementptr [1 x i8], [1 x i8]* @null, i32 0, i32 0
- %rslt2 = call i8* @strncat(i8* %rslt1, i8* %arg3, i32 42)
+ %rslt2 = call ptr @strncat(ptr %rslt1, ptr @null, i32 42)
; rslt3 = strncat(rslt2, "\00hello\00")
- %arg4 = getelementptr [7 x i8], [7 x i8]* @null_hello, i32 0, i32 0
- %rslt3 = call i8* @strncat(i8* %rslt2, i8* %arg4, i32 42)
+ %rslt3 = call ptr @strncat(ptr %rslt2, ptr @null_hello, i32 42)
- call i32 @puts(i8* %rslt3)
+ call i32 @puts(ptr %rslt3)
ret i32 0
}
@bell = constant [5 x i8] c"bell\00"
@null = constant [1 x i8] zeroinitializer
-declare i32 @strncmp(i8*, i8*, i32)
+declare i32 @strncmp(ptr, ptr, i32)
; strncmp("", x, n) -> -*x
-define i32 @test1(i8* %str2) {
+define i32 @test1(ptr %str2) {
; CHECK-LABEL: @test1(
-; CHECK-NEXT: [[STRCMPLOAD:%.*]] = load i8, i8* [[STR2:%.*]], align 1
+; CHECK-NEXT: [[STRCMPLOAD:%.*]] = load i8, ptr [[STR2:%.*]], align 1
; CHECK-NEXT: [[TMP1:%.*]] = zext i8 [[STRCMPLOAD]] to i32
; CHECK-NEXT: [[TMP2:%.*]] = sub nsw i32 0, [[TMP1]]
; CHECK-NEXT: ret i32 [[TMP2]]
;
- %str1 = getelementptr inbounds [1 x i8], [1 x i8]* @null, i32 0, i32 0
- %temp1 = call i32 @strncmp(i8* %str1, i8* %str2, i32 10)
+ %temp1 = call i32 @strncmp(ptr @null, ptr %str2, i32 10)
ret i32 %temp1
}
; strncmp(x, "", n) -> *x
-define i32 @test2(i8* %str1) {
+define i32 @test2(ptr %str1) {
; CHECK-LABEL: @test2(
-; CHECK-NEXT: [[STRCMPLOAD:%.*]] = load i8, i8* [[STR1:%.*]], align 1
+; CHECK-NEXT: [[STRCMPLOAD:%.*]] = load i8, ptr [[STR1:%.*]], align 1
; CHECK-NEXT: [[TMP1:%.*]] = zext i8 [[STRCMPLOAD]] to i32
; CHECK-NEXT: ret i32 [[TMP1]]
;
- %str2 = getelementptr inbounds [1 x i8], [1 x i8]* @null, i32 0, i32 0
- %temp1 = call i32 @strncmp(i8* %str1, i8* %str2, i32 10)
+ %temp1 = call i32 @strncmp(ptr %str1, ptr @null, i32 10)
ret i32 %temp1
}
; CHECK-NEXT: ret i32 -1
;
- %str1 = getelementptr inbounds [5 x i8], [5 x i8]* @hell, i32 0, i32 0
- %str2 = getelementptr inbounds [6 x i8], [6 x i8]* @hello, i32 0, i32 0
- %temp1 = call i32 @strncmp(i8* %str1, i8* %str2, i32 10)
+ %temp1 = call i32 @strncmp(ptr @hell, ptr @hello, i32 10)
ret i32 %temp1
}
; CHECK-NEXT: ret i32 1
;
- %str1 = getelementptr inbounds [5 x i8], [5 x i8]* @hell, i32 0, i32 0
- %str2 = getelementptr inbounds [1 x i8], [1 x i8]* @null, i32 0, i32 0
- %temp1 = call i32 @strncmp(i8* %str1, i8* %str2, i32 10)
+ %temp1 = call i32 @strncmp(ptr @hell, ptr @null, i32 10)
ret i32 %temp1
}
; CHECK-NEXT: ret i32 0
;
- %str1 = getelementptr inbounds [5 x i8], [5 x i8]* @hell, i32 0, i32 0
- %str2 = getelementptr inbounds [6 x i8], [6 x i8]* @hello, i32 0, i32 0
- %temp1 = call i32 @strncmp(i8* %str1, i8* %str2, i32 4)
+ %temp1 = call i32 @strncmp(ptr @hell, ptr @hello, i32 4)
ret i32 %temp1
}
; strncmp(x,y,1) -> memcmp(x,y,1)
-define i32 @test6(i8* %str1, i8* %str2) {
+define i32 @test6(ptr %str1, ptr %str2) {
; CHECK-LABEL: @test6(
-; CHECK-NEXT: [[LHSC:%.*]] = load i8, i8* [[STR1:%.*]], align 1
+; CHECK-NEXT: [[LHSC:%.*]] = load i8, ptr [[STR1:%.*]], align 1
; CHECK-NEXT: [[LHSV:%.*]] = zext i8 [[LHSC]] to i32
-; CHECK-NEXT: [[RHSC:%.*]] = load i8, i8* [[STR2:%.*]], align 1
+; CHECK-NEXT: [[RHSC:%.*]] = load i8, ptr [[STR2:%.*]], align 1
; CHECK-NEXT: [[RHSV:%.*]] = zext i8 [[RHSC]] to i32
; CHECK-NEXT: [[CHARDIFF:%.*]] = sub nsw i32 [[LHSV]], [[RHSV]]
; CHECK-NEXT: ret i32 [[CHARDIFF]]
;
- %temp1 = call i32 @strncmp(i8* %str1, i8* %str2, i32 1)
+ %temp1 = call i32 @strncmp(ptr %str1, ptr %str2, i32 1)
ret i32 %temp1
}
; strncmp(x,y,0) -> 0
-define i32 @test7(i8* %str1, i8* %str2) {
+define i32 @test7(ptr %str1, ptr %str2) {
; CHECK-LABEL: @test7(
; CHECK-NEXT: ret i32 0
;
- %temp1 = call i32 @strncmp(i8* %str1, i8* %str2, i32 0)
+ %temp1 = call i32 @strncmp(ptr %str1, ptr %str2, i32 0)
ret i32 %temp1
}
; strncmp(x,x,n) -> 0
-define i32 @test8(i8* %str, i32 %n) {
+define i32 @test8(ptr %str, i32 %n) {
; CHECK-LABEL: @test8(
; CHECK-NEXT: ret i32 0
;
- %temp1 = call i32 @strncmp(i8* %str, i8* %str, i32 %n)
+ %temp1 = call i32 @strncmp(ptr %str, ptr %str, i32 %n)
ret i32 %temp1
}
; strncmp(nonnull x, nonnull y, n) -> strncmp(x, y, n)
-define i32 @test9(i8* %str1, i8* %str2, i32 %n) {
+define i32 @test9(ptr %str1, ptr %str2, i32 %n) {
; CHECK-LABEL: @test9(
-; CHECK-NEXT: [[TEMP1:%.*]] = call i32 @strncmp(i8* nonnull [[STR1:%.*]], i8* nonnull [[STR2:%.*]], i32 [[N:%.*]])
+; CHECK-NEXT: [[TEMP1:%.*]] = call i32 @strncmp(ptr nonnull [[STR1:%.*]], ptr nonnull [[STR2:%.*]], i32 [[N:%.*]])
; CHECK-NEXT: ret i32 [[TEMP1]]
;
- %temp1 = call i32 @strncmp(i8* nonnull %str1, i8* nonnull %str2, i32 %n)
+ %temp1 = call i32 @strncmp(ptr nonnull %str1, ptr nonnull %str2, i32 %n)
ret i32 %temp1
}
; strncmp(nonnull x, nonnull y, 0) -> 0
-define i32 @test10(i8* %str1, i8* %str2, i32 %n) {
+define i32 @test10(ptr %str1, ptr %str2, i32 %n) {
; CHECK-LABEL: @test10(
; CHECK-NEXT: ret i32 0
;
- %temp1 = call i32 @strncmp(i8* nonnull %str1, i8* nonnull %str2, i32 0)
+ %temp1 = call i32 @strncmp(ptr nonnull %str1, ptr nonnull %str2, i32 0)
ret i32 %temp1
}
; strncmp(x, y, 5) -> strncmp(nonnull x, nonnull y, 5)
-define i32 @test11(i8* %str1, i8* %str2, i32 %n) {
+define i32 @test11(ptr %str1, ptr %str2, i32 %n) {
; CHECK-LABEL: @test11(
-; CHECK-NEXT: [[TEMP1:%.*]] = call i32 @strncmp(i8* noundef nonnull dereferenceable(1) [[STR1:%.*]], i8* noundef nonnull dereferenceable(1) [[STR2:%.*]], i32 5)
+; CHECK-NEXT: [[TEMP1:%.*]] = call i32 @strncmp(ptr noundef nonnull dereferenceable(1) [[STR1:%.*]], ptr noundef nonnull dereferenceable(1) [[STR2:%.*]], i32 5)
; CHECK-NEXT: ret i32 [[TEMP1]]
;
- %temp1 = call i32 @strncmp(i8* %str1, i8* %str2, i32 5)
+ %temp1 = call i32 @strncmp(ptr %str1, ptr %str2, i32 5)
ret i32 %temp1
}
-define i32 @test12(i8* %str1, i8* %str2, i32 %n) null_pointer_is_valid {
+define i32 @test12(ptr %str1, ptr %str2, i32 %n) null_pointer_is_valid {
; CHECK-LABEL: @test12(
-; CHECK-NEXT: [[TEMP1:%.*]] = call i32 @strncmp(i8* [[STR1:%.*]], i8* [[STR2:%.*]], i32 [[N:%.*]])
+; CHECK-NEXT: [[TEMP1:%.*]] = call i32 @strncmp(ptr [[STR1:%.*]], ptr [[STR2:%.*]], i32 [[N:%.*]])
; CHECK-NEXT: ret i32 [[TEMP1]]
;
- %temp1 = call i32 @strncmp(i8* %str1, i8* %str2, i32 %n)
+ %temp1 = call i32 @strncmp(ptr %str1, ptr %str2, i32 %n)
ret i32 %temp1
}
@a = constant %struct.A { [3 x i8] c"123", [4 x i8] c"1231", [4 x i8] c"2345" }
-declare i32 @strncmp(i8*, i8*, i64)
+declare i32 @strncmp(ptr, ptr, i64)
-define void @fold_strncmp_Aa_b(i32* %pcmp) {
+define void @fold_strncmp_Aa_b(ptr %pcmp) {
; CHECK-LABEL: @fold_strncmp_Aa_b(
-; CHECK-NEXT: store i32 0, i32* [[PCMP:%.*]], align 4
-; CHECK-NEXT: [[PCMP1:%.*]] = getelementptr i32, i32* [[PCMP]], i64 1
-; CHECK-NEXT: store i32 0, i32* [[PCMP1]], align 4
-; CHECK-NEXT: [[PCMP2:%.*]] = getelementptr i32, i32* [[PCMP]], i64 2
-; CHECK-NEXT: store i32 0, i32* [[PCMP2]], align 4
-; CHECK-NEXT: [[PCMP3:%.*]] = getelementptr i32, i32* [[PCMP]], i64 3
-; CHECK-NEXT: store i32 0, i32* [[PCMP3]], align 4
-; CHECK-NEXT: [[PCMP4:%.*]] = getelementptr i32, i32* [[PCMP]], i64 4
-; CHECK-NEXT: store i32 0, i32* [[PCMP4]], align 4
-; CHECK-NEXT: [[PCMP5:%.*]] = getelementptr i32, i32* [[PCMP]], i64 5
-; CHECK-NEXT: store i32 0, i32* [[PCMP5]], align 4
-; CHECK-NEXT: [[PCMP6:%.*]] = getelementptr i32, i32* [[PCMP]], i64 6
-; CHECK-NEXT: store i32 0, i32* [[PCMP6]], align 4
-; CHECK-NEXT: [[PCMP7:%.*]] = getelementptr i32, i32* [[PCMP]], i64 7
-; CHECK-NEXT: store i32 -1, i32* [[PCMP7]], align 4
-; CHECK-NEXT: [[PCMP8:%.*]] = getelementptr i32, i32* [[PCMP]], i64 8
-; CHECK-NEXT: store i32 -1, i32* [[PCMP8]], align 4
-; CHECK-NEXT: [[PCMP9:%.*]] = getelementptr i32, i32* [[PCMP]], i64 9
-; CHECK-NEXT: store i32 -1, i32* [[PCMP9]], align 4
+; CHECK-NEXT: store i32 0, ptr [[PCMP:%.*]], align 4
+; CHECK-NEXT: [[PCMP1:%.*]] = getelementptr i32, ptr [[PCMP]], i64 1
+; CHECK-NEXT: store i32 0, ptr [[PCMP1]], align 4
+; CHECK-NEXT: [[PCMP2:%.*]] = getelementptr i32, ptr [[PCMP]], i64 2
+; CHECK-NEXT: store i32 0, ptr [[PCMP2]], align 4
+; CHECK-NEXT: [[PCMP3:%.*]] = getelementptr i32, ptr [[PCMP]], i64 3
+; CHECK-NEXT: store i32 0, ptr [[PCMP3]], align 4
+; CHECK-NEXT: [[PCMP4:%.*]] = getelementptr i32, ptr [[PCMP]], i64 4
+; CHECK-NEXT: store i32 0, ptr [[PCMP4]], align 4
+; CHECK-NEXT: [[PCMP5:%.*]] = getelementptr i32, ptr [[PCMP]], i64 5
+; CHECK-NEXT: store i32 0, ptr [[PCMP5]], align 4
+; CHECK-NEXT: [[PCMP6:%.*]] = getelementptr i32, ptr [[PCMP]], i64 6
+; CHECK-NEXT: store i32 0, ptr [[PCMP6]], align 4
+; CHECK-NEXT: [[PCMP7:%.*]] = getelementptr i32, ptr [[PCMP]], i64 7
+; CHECK-NEXT: store i32 -1, ptr [[PCMP7]], align 4
+; CHECK-NEXT: [[PCMP8:%.*]] = getelementptr i32, ptr [[PCMP]], i64 8
+; CHECK-NEXT: store i32 -1, ptr [[PCMP8]], align 4
+; CHECK-NEXT: [[PCMP9:%.*]] = getelementptr i32, ptr [[PCMP]], i64 9
+; CHECK-NEXT: store i32 -1, ptr [[PCMP9]], align 4
; CHECK-NEXT: ret void
;
; p1 = a.a
- %p1 = getelementptr %struct.A, %struct.A* @a, i32 0, i32 0, i32 0
; p2 = a.b
- %p2 = getelementptr %struct.A, %struct.A* @a, i32 0, i32 1, i32 0
+ %p2 = getelementptr %struct.A, ptr @a, i32 0, i32 1, i32 0
; Fold strncmp(a.a = "123", a.b = "1231", 0) to 0.
- %cmp0 = call i32 @strncmp(i8* %p1, i8* %p2, i64 0)
- %pcmp0 = getelementptr i32, i32* %pcmp, i64 0
- store i32 %cmp0, i32* %pcmp0
+ %cmp0 = call i32 @strncmp(ptr @a, ptr %p2, i64 0)
+ store i32 %cmp0, ptr %pcmp
; Fold strncmp(a.a = "123", a.b = "1231", 1) to 0.
- %cmp1 = call i32 @strncmp(i8* %p1, i8* %p2, i64 1)
- %pcmp1 = getelementptr i32, i32* %pcmp, i64 1
- store i32 %cmp1, i32* %pcmp1
+ %cmp1 = call i32 @strncmp(ptr @a, ptr %p2, i64 1)
+ %pcmp1 = getelementptr i32, ptr %pcmp, i64 1
+ store i32 %cmp1, ptr %pcmp1
; Fold strncmp(a.a = "123", a.b = "1231", 2) to 0.
- %cmp2 = call i32 @strncmp(i8* %p1, i8* %p2, i64 2)
- %pcmp2 = getelementptr i32, i32* %pcmp, i64 2
- store i32 %cmp2, i32* %pcmp2
+ %cmp2 = call i32 @strncmp(ptr @a, ptr %p2, i64 2)
+ %pcmp2 = getelementptr i32, ptr %pcmp, i64 2
+ store i32 %cmp2, ptr %pcmp2
; Fold strncmp(a.a = "123", a.b = "1231", 3) to 0.
- %cmp3 = call i32 @strncmp(i8* %p1, i8* %p2, i64 3)
- %pcmp3 = getelementptr i32, i32* %pcmp, i64 3
- store i32 %cmp3, i32* %pcmp3
+ %cmp3 = call i32 @strncmp(ptr @a, ptr %p2, i64 3)
+ %pcmp3 = getelementptr i32, ptr %pcmp, i64 3
+ store i32 %cmp3, ptr %pcmp3
; Fold strncmp(a.a = "123", a.b = "1231", 4) to 0.
; In this and the subsequent tests, reading past the end of a.a is
; subobject) but handling such cases as if they were well-defined is
; simpler than trying to exclude them.
- %cmp4 = call i32 @strncmp(i8* %p1, i8* %p2, i64 4)
- %pcmp4 = getelementptr i32, i32* %pcmp, i64 4
- store i32 %cmp4, i32* %pcmp4
+ %cmp4 = call i32 @strncmp(ptr @a, ptr %p2, i64 4)
+ %pcmp4 = getelementptr i32, ptr %pcmp, i64 4
+ store i32 %cmp4, ptr %pcmp4
; Fold strncmp("123", "1231" "2", 5) to 0.
- %cmp5 = call i32 @strncmp(i8* %p1, i8* %p2, i64 5)
- %pcmp5 = getelementptr i32, i32* %pcmp, i64 5
- store i32 %cmp5, i32* %pcmp5
+ %cmp5 = call i32 @strncmp(ptr @a, ptr %p2, i64 5)
+ %pcmp5 = getelementptr i32, ptr %pcmp, i64 5
+ store i32 %cmp5, ptr %pcmp5
; Fold strncmp("123", "1231" "23", 6) to 0.
- %cmp6 = call i32 @strncmp(i8* %p1, i8* %p2, i64 6)
- %pcmp6 = getelementptr i32, i32* %pcmp, i64 6
- store i32 %cmp6, i32* %pcmp6
+ %cmp6 = call i32 @strncmp(ptr @a, ptr %p2, i64 6)
+ %pcmp6 = getelementptr i32, ptr %pcmp, i64 6
+ store i32 %cmp6, ptr %pcmp6
; Fold strncmp("123", "1231" "2345", 7) to 1.
- %cmp7 = call i32 @strncmp(i8* %p1, i8* %p2, i64 7)
- %pcmp7 = getelementptr i32, i32* %pcmp, i64 7
- store i32 %cmp7, i32* %pcmp7
+ %cmp7 = call i32 @strncmp(ptr @a, ptr %p2, i64 7)
+ %pcmp7 = getelementptr i32, ptr %pcmp, i64 7
+ store i32 %cmp7, ptr %pcmp7
; Fold strncmp("123", "1231" "2345", 8) to 1.
- %cmp8 = call i32 @strncmp(i8* %p1, i8* %p2, i64 8)
- %pcmp8 = getelementptr i32, i32* %pcmp, i64 8
- store i32 %cmp8, i32* %pcmp8
+ %cmp8 = call i32 @strncmp(ptr @a, ptr %p2, i64 8)
+ %pcmp8 = getelementptr i32, ptr %pcmp, i64 8
+ store i32 %cmp8, ptr %pcmp8
; Fold strncmp("123", "1231" "2345", 9) to 1.
- %cmp9 = call i32 @strncmp(i8* %p1, i8* %p2, i64 9)
- %pcmp9 = getelementptr i32, i32* %pcmp, i64 9
- store i32 %cmp9, i32* %pcmp9
+ %cmp9 = call i32 @strncmp(ptr @a, ptr %p2, i64 9)
+ %pcmp9 = getelementptr i32, ptr %pcmp, i64 9
+ store i32 %cmp9, ptr %pcmp9
ret void
}
-define void @fold_strncmp_Ab_a(i32* %pcmp) {
+define void @fold_strncmp_Ab_a(ptr %pcmp) {
; CHECK-LABEL: @fold_strncmp_Ab_a(
-; CHECK-NEXT: store i32 0, i32* [[PCMP:%.*]], align 4
-; CHECK-NEXT: [[PCMP1:%.*]] = getelementptr i32, i32* [[PCMP]], i64 1
-; CHECK-NEXT: store i32 0, i32* [[PCMP1]], align 4
-; CHECK-NEXT: [[PCMP2:%.*]] = getelementptr i32, i32* [[PCMP]], i64 2
-; CHECK-NEXT: store i32 0, i32* [[PCMP2]], align 4
-; CHECK-NEXT: [[PCMP3:%.*]] = getelementptr i32, i32* [[PCMP]], i64 3
-; CHECK-NEXT: store i32 0, i32* [[PCMP3]], align 4
-; CHECK-NEXT: [[PCMP4:%.*]] = getelementptr i32, i32* [[PCMP]], i64 4
-; CHECK-NEXT: store i32 1, i32* [[PCMP4]], align 4
-; CHECK-NEXT: [[PCMP5:%.*]] = getelementptr i32, i32* [[PCMP]], i64 5
-; CHECK-NEXT: store i32 1, i32* [[PCMP5]], align 4
+; CHECK-NEXT: store i32 0, ptr [[PCMP:%.*]], align 4
+; CHECK-NEXT: [[PCMP1:%.*]] = getelementptr i32, ptr [[PCMP]], i64 1
+; CHECK-NEXT: store i32 0, ptr [[PCMP1]], align 4
+; CHECK-NEXT: [[PCMP2:%.*]] = getelementptr i32, ptr [[PCMP]], i64 2
+; CHECK-NEXT: store i32 0, ptr [[PCMP2]], align 4
+; CHECK-NEXT: [[PCMP3:%.*]] = getelementptr i32, ptr [[PCMP]], i64 3
+; CHECK-NEXT: store i32 0, ptr [[PCMP3]], align 4
+; CHECK-NEXT: [[PCMP4:%.*]] = getelementptr i32, ptr [[PCMP]], i64 4
+; CHECK-NEXT: store i32 1, ptr [[PCMP4]], align 4
+; CHECK-NEXT: [[PCMP5:%.*]] = getelementptr i32, ptr [[PCMP]], i64 5
+; CHECK-NEXT: store i32 1, ptr [[PCMP5]], align 4
; CHECK-NEXT: ret void
;
; p1 = &a.b[3]
- %p1 = getelementptr %struct.A, %struct.A* @a, i32 0, i32 1, i32 3
+ %p1 = getelementptr %struct.A, ptr @a, i32 0, i32 1, i32 3
; p2 = &a.a
- %p2 = getelementptr %struct.A, %struct.A* @a, i32 0, i32 0, i32 0
; Fold strncmp(a.a = "123", &a.b[3] = "1" "2345", 0) to 0.
- %cmp0 = call i32 @strncmp(i8* %p1, i8* %p2, i64 0)
- %pcmp0 = getelementptr i32, i32* %pcmp, i64 0
- store i32 %cmp0, i32* %pcmp0
+ %cmp0 = call i32 @strncmp(ptr %p1, ptr @a, i64 0)
+ store i32 %cmp0, ptr %pcmp
; Fold strncmp(a.a = "123", &a.b[3] = "1" "2345", 1) to 0.
- %cmp1 = call i32 @strncmp(i8* %p1, i8* %p2, i64 1)
- %pcmp1 = getelementptr i32, i32* %pcmp, i64 1
- store i32 %cmp1, i32* %pcmp1
+ %cmp1 = call i32 @strncmp(ptr %p1, ptr @a, i64 1)
+ %pcmp1 = getelementptr i32, ptr %pcmp, i64 1
+ store i32 %cmp1, ptr %pcmp1
; Fold strncmp(a.a = "123", &a.b[3] = "1" "2345", 2) to 0.
- %cmp2 = call i32 @strncmp(i8* %p1, i8* %p2, i64 2)
- %pcmp2 = getelementptr i32, i32* %pcmp, i64 2
- store i32 %cmp2, i32* %pcmp2
+ %cmp2 = call i32 @strncmp(ptr %p1, ptr @a, i64 2)
+ %pcmp2 = getelementptr i32, ptr %pcmp, i64 2
+ store i32 %cmp2, ptr %pcmp2
; Fold strncmp(a.a = "123", &a.b[3] = "1" "2345", 3) to 0.
- %cmp3 = call i32 @strncmp(i8* %p1, i8* %p2, i64 3)
- %pcmp3 = getelementptr i32, i32* %pcmp, i64 3
- store i32 %cmp3, i32* %pcmp3
+ %cmp3 = call i32 @strncmp(ptr %p1, ptr @a, i64 3)
+ %pcmp3 = getelementptr i32, ptr %pcmp, i64 3
+ store i32 %cmp3, ptr %pcmp3
; Fold strncmp(a.a = "123", &a.b[3] = "1" "2345", 4) to 1.
- %cmp4 = call i32 @strncmp(i8* %p1, i8* %p2, i64 4)
- %pcmp4 = getelementptr i32, i32* %pcmp, i64 4
- store i32 %cmp4, i32* %pcmp4
+ %cmp4 = call i32 @strncmp(ptr %p1, ptr @a, i64 4)
+ %pcmp4 = getelementptr i32, ptr %pcmp, i64 4
+ store i32 %cmp4, ptr %pcmp4
; Fold strncmp(a.a = "123", &a.b[3] = "1" "2345", 5) to 1.
- %cmp5 = call i32 @strncmp(i8* %p1, i8* %p2, i64 5)
- %pcmp5 = getelementptr i32, i32* %pcmp, i64 5
- store i32 %cmp5, i32* %pcmp5
+ %cmp5 = call i32 @strncmp(ptr %p1, ptr @a, i64 5)
+ %pcmp5 = getelementptr i32, ptr %pcmp, i64 5
+ store i32 %cmp5, ptr %pcmp5
ret void
}
; Exercise folding of strncmp calls with constant arrays including both
; negative and positive characters and both constant and nonconstant sizes.
-declare i32 @strncmp(i8*, i8*, i64)
+declare i32 @strncmp(ptr, ptr, i64)
@a = constant [7 x i8] c"abcdef\7f"
@b = constant [7 x i8] c"abcdef\80"
; Exercise strncmp(A + C, B + C, 2) folding of small arrays that differ in
; a character with the opposite sign and a constant size.
-define void @fold_strncmp_cst_cst(i32* %pcmp) {
+define void @fold_strncmp_cst_cst(ptr %pcmp) {
; CHECK-LABEL: @fold_strncmp_cst_cst(
-; CHECK-NEXT: store i32 -1, i32* [[PCMP:%.*]], align 4
-; CHECK-NEXT: [[SB5_A5:%.*]] = getelementptr i32, i32* [[PCMP]], i64 1
-; CHECK-NEXT: store i32 1, i32* [[SB5_A5]], align 4
-; CHECK-NEXT: [[SA6_B6:%.*]] = getelementptr i32, i32* [[PCMP]], i64 2
-; CHECK-NEXT: store i32 -1, i32* [[SA6_B6]], align 4
-; CHECK-NEXT: [[SB6_A6:%.*]] = getelementptr i32, i32* [[PCMP]], i64 3
-; CHECK-NEXT: store i32 1, i32* [[SB6_A6]], align 4
+; CHECK-NEXT: store i32 -1, ptr [[PCMP:%.*]], align 4
+; CHECK-NEXT: [[SB5_A5:%.*]] = getelementptr i32, ptr [[PCMP]], i64 1
+; CHECK-NEXT: store i32 1, ptr [[SB5_A5]], align 4
+; CHECK-NEXT: [[SA6_B6:%.*]] = getelementptr i32, ptr [[PCMP]], i64 2
+; CHECK-NEXT: store i32 -1, ptr [[SA6_B6]], align 4
+; CHECK-NEXT: [[SB6_A6:%.*]] = getelementptr i32, ptr [[PCMP]], i64 3
+; CHECK-NEXT: store i32 1, ptr [[SB6_A6]], align 4
; CHECK-NEXT: ret void
;
- %p5 = getelementptr [7 x i8], [7 x i8]* @a, i64 0, i64 5
- %p6 = getelementptr [7 x i8], [7 x i8]* @a, i64 0, i64 6
+ %p5 = getelementptr [7 x i8], ptr @a, i64 0, i64 5
+ %p6 = getelementptr [7 x i8], ptr @a, i64 0, i64 6
- %q5 = getelementptr [7 x i8], [7 x i8]* @b, i64 0, i64 5
- %q6 = getelementptr [7 x i8], [7 x i8]* @b, i64 0, i64 6
+ %q5 = getelementptr [7 x i8], ptr @b, i64 0, i64 5
+ %q6 = getelementptr [7 x i8], ptr @b, i64 0, i64 6
; Fold strncmp(a + 5, b + 5, 2) to -1.
- %ca5_b5 = call i32 @strncmp(i8* %p5, i8* %q5, i64 2)
- %sa5_b5 = getelementptr i32, i32* %pcmp, i64 0
- store i32 %ca5_b5, i32* %sa5_b5
+ %ca5_b5 = call i32 @strncmp(ptr %p5, ptr %q5, i64 2)
+ store i32 %ca5_b5, ptr %pcmp
; Fold strncmp(b + 5, a + 5, 2) to +1.
- %cb5_a5 = call i32 @strncmp(i8* %q5, i8* %p5, i64 2)
- %sb5_a5 = getelementptr i32, i32* %pcmp, i64 1
- store i32 %cb5_a5, i32* %sb5_a5
+ %cb5_a5 = call i32 @strncmp(ptr %q5, ptr %p5, i64 2)
+ %sb5_a5 = getelementptr i32, ptr %pcmp, i64 1
+ store i32 %cb5_a5, ptr %sb5_a5
; Fold strncmp(a + 6, b + 6, 1) to -1.
- %ca6_b6 = call i32 @strncmp(i8* %p6, i8* %q6, i64 1)
- %sa6_b6 = getelementptr i32, i32* %pcmp, i64 2
- store i32 %ca6_b6, i32* %sa6_b6
+ %ca6_b6 = call i32 @strncmp(ptr %p6, ptr %q6, i64 1)
+ %sa6_b6 = getelementptr i32, ptr %pcmp, i64 2
+ store i32 %ca6_b6, ptr %sa6_b6
; Fold strncmp(b + 6, a + 6, 1) to +1.
- %cb6_a6 = call i32 @strncmp(i8* %q6, i8* %p6, i64 1)
- %sb6_a6 = getelementptr i32, i32* %pcmp, i64 3
- store i32 %cb6_a6, i32* %sb6_a6
+ %cb6_a6 = call i32 @strncmp(ptr %q6, ptr %p6, i64 1)
+ %sb6_a6 = getelementptr i32, ptr %pcmp, i64 3
+ store i32 %cb6_a6, ptr %sb6_a6
ret void
}
; Exercise strncmp(A, B, N) folding of arrays that differ in a character
; with the opposite sign and a variable size
-define void @fold_strncmp_cst_var(i32* %pcmp, i64 %n) {
+define void @fold_strncmp_cst_var(ptr %pcmp, i64 %n) {
; CHECK-LABEL: @fold_strncmp_cst_var(
; CHECK-NEXT: [[TMP1:%.*]] = icmp ugt i64 [[N:%.*]], 6
; CHECK-NEXT: [[TMP2:%.*]] = sext i1 [[TMP1]] to i32
-; CHECK-NEXT: store i32 [[TMP2]], i32* [[PCMP:%.*]], align 4
+; CHECK-NEXT: store i32 [[TMP2]], ptr [[PCMP:%.*]], align 4
; CHECK-NEXT: [[TMP3:%.*]] = icmp ugt i64 [[N]], 6
; CHECK-NEXT: [[TMP4:%.*]] = zext i1 [[TMP3]] to i32
-; CHECK-NEXT: [[SB0_A0:%.*]] = getelementptr i32, i32* [[PCMP]], i64 1
-; CHECK-NEXT: store i32 [[TMP4]], i32* [[SB0_A0]], align 4
+; CHECK-NEXT: [[SB0_A0:%.*]] = getelementptr i32, ptr [[PCMP]], i64 1
+; CHECK-NEXT: store i32 [[TMP4]], ptr [[SB0_A0]], align 4
; CHECK-NEXT: [[TMP5:%.*]] = icmp ne i64 [[N]], 0
; CHECK-NEXT: [[TMP6:%.*]] = sext i1 [[TMP5]] to i32
-; CHECK-NEXT: [[SA6_B6:%.*]] = getelementptr i32, i32* [[PCMP]], i64 2
-; CHECK-NEXT: store i32 [[TMP6]], i32* [[SA6_B6]], align 4
+; CHECK-NEXT: [[SA6_B6:%.*]] = getelementptr i32, ptr [[PCMP]], i64 2
+; CHECK-NEXT: store i32 [[TMP6]], ptr [[SA6_B6]], align 4
; CHECK-NEXT: [[TMP7:%.*]] = icmp ne i64 [[N]], 0
; CHECK-NEXT: [[TMP8:%.*]] = zext i1 [[TMP7]] to i32
-; CHECK-NEXT: [[SB6_A6:%.*]] = getelementptr i32, i32* [[PCMP]], i64 3
-; CHECK-NEXT: store i32 [[TMP8]], i32* [[SB6_A6]], align 4
+; CHECK-NEXT: [[SB6_A6:%.*]] = getelementptr i32, ptr [[PCMP]], i64 3
+; CHECK-NEXT: store i32 [[TMP8]], ptr [[SB6_A6]], align 4
; CHECK-NEXT: ret void
;
- %p0 = getelementptr [7 x i8], [7 x i8]* @a, i64 0, i64 0
- %p6 = getelementptr [7 x i8], [7 x i8]* @a, i64 0, i64 6
+ %p6 = getelementptr [7 x i8], ptr @a, i64 0, i64 6
- %q0 = getelementptr [7 x i8], [7 x i8]* @b, i64 0, i64 0
- %q6 = getelementptr [7 x i8], [7 x i8]* @b, i64 0, i64 6
+ %q6 = getelementptr [7 x i8], ptr @b, i64 0, i64 6
; Fold strncmp(a, b, n) to -1.
- %ca0_b0 = call i32 @strncmp(i8* %p0, i8* %q0, i64 %n)
- %sa0_b0 = getelementptr i32, i32* %pcmp, i64 0
- store i32 %ca0_b0, i32* %sa0_b0
+ %ca0_b0 = call i32 @strncmp(ptr @a, ptr @b, i64 %n)
+ store i32 %ca0_b0, ptr %pcmp
; Fold strncmp(b, a, n) to +1.
- %cb0_a0 = call i32 @strncmp(i8* %q0, i8* %p0, i64 %n)
- %sb0_a0 = getelementptr i32, i32* %pcmp, i64 1
- store i32 %cb0_a0, i32* %sb0_a0
+ %cb0_a0 = call i32 @strncmp(ptr @b, ptr @a, i64 %n)
+ %sb0_a0 = getelementptr i32, ptr %pcmp, i64 1
+ store i32 %cb0_a0, ptr %sb0_a0
; Fold strncmp(a + 6, b + 6, n) to -1.
- %ca6_b6 = call i32 @strncmp(i8* %p6, i8* %q6, i64 %n)
- %sa6_b6 = getelementptr i32, i32* %pcmp, i64 2
- store i32 %ca6_b6, i32* %sa6_b6
+ %ca6_b6 = call i32 @strncmp(ptr %p6, ptr %q6, i64 %n)
+ %sa6_b6 = getelementptr i32, ptr %pcmp, i64 2
+ store i32 %ca6_b6, ptr %sa6_b6
; Fold strncmp(b + 6, a + 6, n) to +1.
- %cb6_a6 = call i32 @strncmp(i8* %q6, i8* %p6, i64 %n)
- %sb6_a6 = getelementptr i32, i32* %pcmp, i64 3
- store i32 %cb6_a6, i32* %sb6_a6
+ %cb6_a6 = call i32 @strncmp(ptr %q6, ptr %p6, i64 %n)
+ %sb6_a6 = getelementptr i32, ptr %pcmp, i64 3
+ store i32 %cb6_a6, ptr %sb6_a6
ret void
}
target datalayout = "e-p:64:64:64"
-declare i32 @strncmp(i8*, i8*, i32)
+declare i32 @strncmp(ptr, ptr, i32)
-define i32 @test6(i8* %str1, i8* %str2) {
+define i32 @test6(ptr %str1, ptr %str2) {
; CHECK-LABEL: @test6(
-; CHECK: call i32 @strncmp(i8* %str1, i8* %str2, i32 1)
+; CHECK: call i32 @strncmp(ptr %str1, ptr %str2, i32 1)
- %temp1 = call i32 @strncmp(i8* %str1, i8* %str2, i32 1)
+ %temp1 = call i32 @strncmp(ptr %str1, ptr %str2, i32 1)
ret i32 %temp1
}
@str2 = constant [3 x i8] c"abc"
@str3 = constant [4 x i8] c"abcd"
-declare i8* @strncpy(i8*, i8*, i64)
+declare ptr @strncpy(ptr, ptr, i64)
-define void @fill_with_zeros(i8* %dst) {
+define void @fill_with_zeros(ptr %dst) {
; CHECK-LABEL: @fill_with_zeros(
-; CHECK-NEXT: [[TMP1:%.*]] = bitcast i8* [[DST:%.*]] to i32*
-; CHECK-NEXT: store i32 97, i32* [[TMP1]], align 1
+; CHECK-NEXT: store i32 97, ptr [[DST:%.*]], align 1
; CHECK-NEXT: ret void
;
- tail call i8* @strncpy(i8* %dst, i8* getelementptr inbounds ([2 x i8], [2 x i8]* @str, i64 0, i64 0), i64 4)
+ tail call ptr @strncpy(ptr %dst, ptr @str, i64 4)
ret void
}
-define void @fill_with_zeros2(i8* %dst) {
+define void @fill_with_zeros2(ptr %dst) {
; CHECK-LABEL: @fill_with_zeros2(
-; CHECK-NEXT: [[TMP1:%.*]] = bitcast i8* [[DST:%.*]] to i32*
-; CHECK-NEXT: store i32 6513249, i32* [[TMP1]], align 1
+; CHECK-NEXT: store i32 6513249, ptr [[DST:%.*]], align 1
; CHECK-NEXT: ret void
;
- tail call i8* @strncpy(i8* %dst, i8* getelementptr inbounds ([3 x i8], [3 x i8]* @str2, i64 0, i64 0), i64 4)
+ tail call ptr @strncpy(ptr %dst, ptr @str2, i64 4)
ret void
}
-define void @fill_with_zeros3(i8* %dst) {
+define void @fill_with_zeros3(ptr %dst) {
; CHECK-LABEL: @fill_with_zeros3(
-; CHECK-NEXT: [[TMP1:%.*]] = bitcast i8* [[DST:%.*]] to i32*
-; CHECK-NEXT: store i32 1684234849, i32* [[TMP1]], align 1
+; CHECK-NEXT: store i32 1684234849, ptr [[DST:%.*]], align 1
; CHECK-NEXT: ret void
;
- tail call i8* @strncpy(i8* %dst, i8* getelementptr inbounds ([4 x i8], [4 x i8]* @str3, i64 0, i64 0), i64 4)
+ tail call ptr @strncpy(ptr %dst, ptr @str3, i64 4)
ret void
}
-define void @fill_with_zeros4(i8* %dst) {
+define void @fill_with_zeros4(ptr %dst) {
; CHECK-LABEL: @fill_with_zeros4(
-; CHECK-NEXT: call void @llvm.memcpy.p0i8.p0i8.i64(i8* noundef nonnull align 1 dereferenceable(128) [[DST:%.*]], i8* noundef nonnull align 1 dereferenceable(128) getelementptr inbounds ([129 x i8], [129 x i8]* @str.2, i64 0, i64 0), i64 128, i1 false)
+; CHECK-NEXT: call void @llvm.memcpy.p0.p0.i64(ptr noundef nonnull align 1 dereferenceable(128) [[DST:%.*]], ptr noundef nonnull align 1 dereferenceable(128) @str.2, i64 128, i1 false)
; CHECK-NEXT: ret void
;
- tail call i8* @strncpy(i8* %dst, i8* getelementptr inbounds ([4 x i8], [4 x i8]* @str3, i64 0, i64 0), i64 128)
+ tail call ptr @strncpy(ptr %dst, ptr @str3, i64 128)
ret void
}
-define void @no_simplify(i8* %dst) {
+define void @no_simplify(ptr %dst) {
; CHECK-LABEL: @no_simplify(
-; CHECK-NEXT: [[TMP1:%.*]] = tail call i8* @strncpy(i8* noundef nonnull dereferenceable(1) [[DST:%.*]], i8* noundef nonnull dereferenceable(5) getelementptr inbounds ([4 x i8], [4 x i8]* @str3, i64 0, i64 0), i64 129)
+; CHECK-NEXT: [[TMP1:%.*]] = tail call ptr @strncpy(ptr noundef nonnull dereferenceable(1) [[DST:%.*]], ptr noundef nonnull dereferenceable(5) @str3, i64 129)
; CHECK-NEXT: ret void
;
- tail call i8* @strncpy(i8* %dst, i8* getelementptr inbounds ([4 x i8], [4 x i8]* @str3, i64 0, i64 0), i64 129)
+ tail call ptr @strncpy(ptr %dst, ptr @str3, i64 129)
ret void
}
;
; RUN: opt < %s -passes=instcombine -S | FileCheck %s
-declare i8* @strncpy(i8*, i8*, i64)
+declare ptr @strncpy(ptr, ptr, i64)
; A string of length 4 but size 9 to also verify that characters after
; the nul don't affect the transformation.
@s4 = constant [9 x i8] c"1234\00567\00"
-declare void @sink(i8*, i8*)
+declare void @sink(ptr, ptr)
; Verify that exactly overlapping strncpy(D, D, N) calls are simplified
; only when N < 2.
-define void @fold_strncpy_overlap(i8* %dst, i64 %n) {
+define void @fold_strncpy_overlap(ptr %dst, i64 %n) {
; CHECK-LABEL: @fold_strncpy_overlap(
-; CHECK-NEXT: call void @sink(i8* [[DST:%.*]], i8* [[DST]])
-; CHECK-NEXT: call void @sink(i8* [[DST]], i8* [[DST]])
+; CHECK-NEXT: call void @sink(ptr [[DST:%.*]], ptr [[DST]])
+; CHECK-NEXT: call void @sink(ptr [[DST]], ptr [[DST]])
; CHECK-NEXT: ret void
;
; Fold strncpy(D, D, 0) to D.
- %ed_0 = call i8* @strncpy(i8* %dst, i8* %dst, i64 0)
- call void @sink(i8* %dst, i8* %ed_0)
+ %ed_0 = call ptr @strncpy(ptr %dst, ptr %dst, i64 0)
+ call void @sink(ptr %dst, ptr %ed_0)
; Fold strncpy(D, D, 1) to D.
- %ed_1 = call i8* @strncpy(i8* %dst, i8* %dst, i64 1)
- call void @sink(i8* %dst, i8* %ed_1)
+ %ed_1 = call ptr @strncpy(ptr %dst, ptr %dst, i64 1)
+ call void @sink(ptr %dst, ptr %ed_1)
ret void
}
; memset(D + strnlen(D, N), D, N - strnlen(D, N))
; there is little to gain from it.
-define void @call_strncpy_overlap(i8* %dst, i64 %n) {
+define void @call_strncpy_overlap(ptr %dst, i64 %n) {
; CHECK-LABEL: @call_strncpy_overlap(
-; CHECK-NEXT: [[ED_2:%.*]] = call i8* @strncpy(i8* noundef nonnull dereferenceable(1) [[DST:%.*]], i8* noundef nonnull dereferenceable(1) [[DST]], i64 2)
-; CHECK-NEXT: call void @sink(i8* [[DST]], i8* [[ED_2]])
-; CHECK-NEXT: [[ED_3:%.*]] = call i8* @strncpy(i8* noundef nonnull dereferenceable(1) [[DST]], i8* noundef nonnull dereferenceable(1) [[DST]], i64 3)
-; CHECK-NEXT: call void @sink(i8* [[DST]], i8* [[ED_3]])
-; CHECK-NEXT: [[ED_N:%.*]] = call i8* @strncpy(i8* [[DST]], i8* [[DST]], i64 [[N:%.*]])
-; CHECK-NEXT: call void @sink(i8* [[DST]], i8* [[ED_N]])
+; CHECK-NEXT: [[ED_2:%.*]] = call ptr @strncpy(ptr noundef nonnull dereferenceable(1) [[DST:%.*]], ptr noundef nonnull dereferenceable(1) [[DST]], i64 2)
+; CHECK-NEXT: call void @sink(ptr [[DST]], ptr [[ED_2]])
+; CHECK-NEXT: [[ED_3:%.*]] = call ptr @strncpy(ptr noundef nonnull dereferenceable(1) [[DST]], ptr noundef nonnull dereferenceable(1) [[DST]], i64 3)
+; CHECK-NEXT: call void @sink(ptr [[DST]], ptr [[ED_3]])
+; CHECK-NEXT: [[ED_N:%.*]] = call ptr @strncpy(ptr [[DST]], ptr [[DST]], i64 [[N:%.*]])
+; CHECK-NEXT: call void @sink(ptr [[DST]], ptr [[ED_N]])
; CHECK-NEXT: ret void
;
; Do not transform strncpy(D, D, 2).
- %ed_2 = call i8* @strncpy(i8* %dst, i8* %dst, i64 2)
- call void @sink(i8* %dst, i8* %ed_2)
+ %ed_2 = call ptr @strncpy(ptr %dst, ptr %dst, i64 2)
+ call void @sink(ptr %dst, ptr %ed_2)
; Do not transform strncpy(D, D, 3).
- %ed_3 = call i8* @strncpy(i8* %dst, i8* %dst, i64 3)
- call void @sink(i8* %dst, i8* %ed_3)
+ %ed_3 = call ptr @strncpy(ptr %dst, ptr %dst, i64 3)
+ call void @sink(ptr %dst, ptr %ed_3)
; Do not transform strncpy(D, D, N).
- %ed_n = call i8* @strncpy(i8* %dst, i8* %dst, i64 %n)
- call void @sink(i8* %dst, i8* %ed_n)
+ %ed_n = call ptr @strncpy(ptr %dst, ptr %dst, i64 %n)
+ call void @sink(ptr %dst, ptr %ed_n)
ret void
}
; Verify that strncpy(D, "", N) calls are transformed to memset(D, 0, N).
-define void @fold_strncpy_s0(i8* %dst, i64 %n) {
+define void @fold_strncpy_s0(ptr %dst, i64 %n) {
; CHECK-LABEL: @fold_strncpy_s0(
-; CHECK-NEXT: call void @sink(i8* [[DST:%.*]], i8* [[DST]])
-; CHECK-NEXT: store i8 0, i8* [[DST]], align 1
-; CHECK-NEXT: call void @sink(i8* nonnull [[DST]], i8* nonnull [[DST]])
-; CHECK-NEXT: [[TMP1:%.*]] = bitcast i8* [[DST]] to i16*
-; CHECK-NEXT: store i16 0, i16* [[TMP1]], align 1
-; CHECK-NEXT: call void @sink(i8* nonnull [[DST]], i8* nonnull [[DST]])
-; CHECK-NEXT: call void @llvm.memset.p0i8.i64(i8* noundef nonnull align 1 dereferenceable(9) [[DST]], i8 0, i64 9, i1 false)
-; CHECK-NEXT: call void @sink(i8* nonnull [[DST]], i8* nonnull [[DST]])
-; CHECK-NEXT: call void @llvm.memset.p0i8.i64(i8* nonnull align 1 [[DST]], i8 0, i64 [[N:%.*]], i1 false)
-; CHECK-NEXT: call void @sink(i8* nonnull [[DST]], i8* nonnull [[DST]])
+; CHECK-NEXT: call void @sink(ptr [[DST:%.*]], ptr [[DST]])
+; CHECK-NEXT: store i8 0, ptr [[DST]], align 1
+; CHECK-NEXT: call void @sink(ptr nonnull [[DST]], ptr nonnull [[DST]])
+; CHECK-NEXT: store i16 0, ptr [[DST]], align 1
+; CHECK-NEXT: call void @sink(ptr nonnull [[DST]], ptr nonnull [[DST]])
+; CHECK-NEXT: call void @llvm.memset.p0.i64(ptr noundef nonnull align 1 dereferenceable(9) [[DST]], i8 0, i64 9, i1 false)
+; CHECK-NEXT: call void @sink(ptr nonnull [[DST]], ptr nonnull [[DST]])
+; CHECK-NEXT: call void @llvm.memset.p0.i64(ptr nonnull align 1 [[DST]], i8 0, i64 [[N:%.*]], i1 false)
+; CHECK-NEXT: call void @sink(ptr nonnull [[DST]], ptr nonnull [[DST]])
; CHECK-NEXT: ret void
;
- %ps0 = getelementptr [9 x i8], [9 x i8]* @s4, i32 0, i32 4
+ %ps0 = getelementptr [9 x i8], ptr @s4, i32 0, i32 4
; Fold strncpy(D, "", 0) to just D.
- %es0_0 = call i8* @strncpy(i8* %dst, i8* %ps0, i64 0)
- call void @sink(i8* %dst, i8* %es0_0)
+ %es0_0 = call ptr @strncpy(ptr %dst, ptr %ps0, i64 0)
+ call void @sink(ptr %dst, ptr %es0_0)
; Transform strncpy(D, "", 1) to *D = '\0, D.
- %es0_1 = call i8* @strncpy(i8* %dst, i8* %ps0, i64 1)
- call void @sink(i8* %dst, i8* %es0_1)
+ %es0_1 = call ptr @strncpy(ptr %dst, ptr %ps0, i64 1)
+ call void @sink(ptr %dst, ptr %es0_1)
; Transform strncpy(D, "", 2) to memset(D, 0, 2), D.
- %es0_2 = call i8* @strncpy(i8* %dst, i8* %ps0, i64 2)
- call void @sink(i8* %dst, i8* %es0_2)
+ %es0_2 = call ptr @strncpy(ptr %dst, ptr %ps0, i64 2)
+ call void @sink(ptr %dst, ptr %es0_2)
; Transform strncpy(D, "", 9) to memset(D, 0, 9), D.
- %es0_9 = call i8* @strncpy(i8* %dst, i8* %ps0, i64 9)
- call void @sink(i8* %dst, i8* %es0_9)
+ %es0_9 = call ptr @strncpy(ptr %dst, ptr %ps0, i64 9)
+ call void @sink(ptr %dst, ptr %es0_9)
; Transform strncpy(D, "", n) to memset(D, 0, n), D.
- %es0_n = call i8* @strncpy(i8* %dst, i8* %ps0, i64 %n)
- call void @sink(i8* %dst, i8* %es0_n)
+ %es0_n = call ptr @strncpy(ptr %dst, ptr %ps0, i64 %n)
+ call void @sink(ptr %dst, ptr %es0_n)
ret void
}
; Verify that strncpy(D, S, N) calls with nonconstant source S and constant
; size are simplified when N < 2.
-define void @fold_strncpy_s(i8* %dst, i8* %src, i64 %n) {
+define void @fold_strncpy_s(ptr %dst, ptr %src, i64 %n) {
; CHECK-LABEL: @fold_strncpy_s(
-; CHECK-NEXT: call void @sink(i8* [[DST:%.*]], i8* [[DST]])
-; CHECK-NEXT: [[STXNCPY_CHAR0:%.*]] = load i8, i8* [[SRC:%.*]], align 1
-; CHECK-NEXT: store i8 [[STXNCPY_CHAR0]], i8* [[DST]], align 1
-; CHECK-NEXT: call void @sink(i8* nonnull [[DST]], i8* nonnull [[DST]])
+; CHECK-NEXT: call void @sink(ptr [[DST:%.*]], ptr [[DST]])
+; CHECK-NEXT: [[STXNCPY_CHAR0:%.*]] = load i8, ptr [[SRC:%.*]], align 1
+; CHECK-NEXT: store i8 [[STXNCPY_CHAR0]], ptr [[DST]], align 1
+; CHECK-NEXT: call void @sink(ptr nonnull [[DST]], ptr nonnull [[DST]])
; CHECK-NEXT: ret void
;
; Fold strncpy(D, S, 0) to just D.
- %ed_0 = call i8* @strncpy(i8* %dst, i8* %src, i64 0)
- call void @sink(i8* %dst, i8* %ed_0)
+ %ed_0 = call ptr @strncpy(ptr %dst, ptr %src, i64 0)
+ call void @sink(ptr %dst, ptr %ed_0)
; Transform strncpy(D, S, 1) to *D = '\0, D.
- %ed_1 = call i8* @strncpy(i8* %dst, i8* %src, i64 1)
- call void @sink(i8* %dst, i8* %ed_1)
+ %ed_1 = call ptr @strncpy(ptr %dst, ptr %src, i64 1)
+ call void @sink(ptr %dst, ptr %ed_1)
ret void
}
; Also verify that the arguments of the call are annotated with the right
; attributes.
-define void @call_strncpy_s(i8* %dst, i8* %src, i64 %n) {
+define void @call_strncpy_s(ptr %dst, ptr %src, i64 %n) {
; CHECK-LABEL: @call_strncpy_s(
-; CHECK-NEXT: [[ED_2:%.*]] = call i8* @strncpy(i8* noundef nonnull dereferenceable(1) [[DST:%.*]], i8* noundef nonnull dereferenceable(1) [[SRC:%.*]], i64 2)
-; CHECK-NEXT: call void @sink(i8* [[DST]], i8* [[ED_2]])
-; CHECK-NEXT: [[ED_9:%.*]] = call i8* @strncpy(i8* noundef nonnull dereferenceable(1) [[DST]], i8* noundef nonnull dereferenceable(1) [[SRC]], i64 9)
-; CHECK-NEXT: call void @sink(i8* [[DST]], i8* [[ED_9]])
-; CHECK-NEXT: [[ED_N:%.*]] = call i8* @strncpy(i8* [[DST]], i8* [[SRC]], i64 [[N:%.*]])
-; CHECK-NEXT: call void @sink(i8* [[DST]], i8* [[ED_N]])
+; CHECK-NEXT: [[ED_2:%.*]] = call ptr @strncpy(ptr noundef nonnull dereferenceable(1) [[DST:%.*]], ptr noundef nonnull dereferenceable(1) [[SRC:%.*]], i64 2)
+; CHECK-NEXT: call void @sink(ptr [[DST]], ptr [[ED_2]])
+; CHECK-NEXT: [[ED_9:%.*]] = call ptr @strncpy(ptr noundef nonnull dereferenceable(1) [[DST]], ptr noundef nonnull dereferenceable(1) [[SRC]], i64 9)
+; CHECK-NEXT: call void @sink(ptr [[DST]], ptr [[ED_9]])
+; CHECK-NEXT: [[ED_N:%.*]] = call ptr @strncpy(ptr [[DST]], ptr [[SRC]], i64 [[N:%.*]])
+; CHECK-NEXT: call void @sink(ptr [[DST]], ptr [[ED_N]])
; CHECK-NEXT: ret void
;
; Do not transform strncpy(D, S, 2) when S is unknown. Both *D and *S must
; be derefernceable but neither D[1] nor S[1] need be.
- %ed_2 = call i8* @strncpy(i8* %dst, i8* %src, i64 2)
- call void @sink(i8* %dst, i8* %ed_2)
+ %ed_2 = call ptr @strncpy(ptr %dst, ptr %src, i64 2)
+ call void @sink(ptr %dst, ptr %ed_2)
; Do not transform strncpy(D, S, 9) when S is unknown..
- %ed_9 = call i8* @strncpy(i8* %dst, i8* %src, i64 9)
- call void @sink(i8* %dst, i8* %ed_9)
+ %ed_9 = call ptr @strncpy(ptr %dst, ptr %src, i64 9)
+ call void @sink(ptr %dst, ptr %ed_9)
; Do not transform strncpy(D, S, N) when all arguments are unknown. Both
; D and S must be nonnull but neither *D nor *S need be dereferenceable.
; TODO: Both D and S should be annotated nonnull and noundef regardless
; of the value of N. See https://reviews.llvm.org/D124633.
- %ed_n = call i8* @strncpy(i8* %dst, i8* %src, i64 %n)
- call void @sink(i8* %dst, i8* %ed_n)
+ %ed_n = call ptr @strncpy(ptr %dst, ptr %src, i64 %n)
+ call void @sink(ptr %dst, ptr %ed_n)
ret void
}
define void @test_no_simplify() {
; CHECK-LABEL: @test_no_simplify(
- %dst = getelementptr inbounds [60 x i16], [60 x i16]* @a, i32 0, i32 0
- %src = getelementptr inbounds [60 x i8], [60 x i8]* @b, i32 0, i32 0
; CHECK-NEXT: call i16 @__strncpy_chk
- call i16 @__strncpy_chk(i16* %dst, i8* %src, i32 60, i32 60)
+ call i16 @__strncpy_chk(ptr @a, ptr @b, i32 60, i32 60)
ret void
}
-declare i16 @__strncpy_chk(i16*, i8*, i32, i32)
+declare i16 @__strncpy_chk(ptr, ptr, i32, i32)
;
; RUN: opt < %s -passes=instcombine -S | FileCheck %s
-declare i64 @strnlen(i8*, i64)
+declare i64 @strnlen(ptr, i64)
@ax = external global [0 x i8]
@s5 = constant [6 x i8] c"12345\00"
; Verify that the strnlen pointer argument is not annotated nonnull when
; nothing is known about the bound.
-define i64 @no_access_strnlen_p_n(i8* %ptr, i64 %n) {
+define i64 @no_access_strnlen_p_n(ptr %ptr, i64 %n) {
; CHECK-LABEL: @no_access_strnlen_p_n(
-; CHECK-NEXT: [[LEN:%.*]] = call i64 @strnlen(i8* [[PTR:%.*]], i64 [[N:%.*]])
+; CHECK-NEXT: [[LEN:%.*]] = call i64 @strnlen(ptr [[PTR:%.*]], i64 [[N:%.*]])
; CHECK-NEXT: ret i64 [[LEN]]
;
- %len = call i64 @strnlen(i8* %ptr, i64 %n)
+ %len = call i64 @strnlen(ptr %ptr, i64 %n)
ret i64 %len
}
; Verify that the strnlen pointer argument is annotated dereferenceable(1)
; (and not more) when the constant bound is greater than 1.
-define i64 @access_strnlen_p_2(i8* %ptr) {
+define i64 @access_strnlen_p_2(ptr %ptr) {
; CHECK-LABEL: @access_strnlen_p_2(
-; CHECK-NEXT: [[LEN:%.*]] = call i64 @strnlen(i8* noundef nonnull dereferenceable(1) [[PTR:%.*]], i64 2)
+; CHECK-NEXT: [[LEN:%.*]] = call i64 @strnlen(ptr noundef nonnull dereferenceable(1) [[PTR:%.*]], i64 2)
; CHECK-NEXT: ret i64 [[LEN]]
;
- %len = call i64 @strnlen(i8* noundef nonnull dereferenceable(1) %ptr, i64 2)
+ %len = call i64 @strnlen(ptr noundef nonnull dereferenceable(1) %ptr, i64 2)
ret i64 %len
}
; Verify that the strnlen pointer argument is annotated nonnull etc.,
; when the bound is known to be nonzero.
-define i64 @access_strnlen_p_nz(i8* %ptr, i64 %n) {
+define i64 @access_strnlen_p_nz(ptr %ptr, i64 %n) {
; CHECK-LABEL: @access_strnlen_p_nz(
; CHECK-NEXT: [[NNZ:%.*]] = or i64 [[N:%.*]], 1
-; CHECK-NEXT: [[LEN:%.*]] = call i64 @strnlen(i8* noundef nonnull dereferenceable(1) [[PTR:%.*]], i64 [[NNZ]])
+; CHECK-NEXT: [[LEN:%.*]] = call i64 @strnlen(ptr noundef nonnull dereferenceable(1) [[PTR:%.*]], i64 [[NNZ]])
; CHECK-NEXT: ret i64 [[LEN]]
;
%nnz = or i64 %n, 1
- %len = call i64 @strnlen(i8* noundef nonnull dereferenceable(1) %ptr, i64 %nnz)
+ %len = call i64 @strnlen(ptr noundef nonnull dereferenceable(1) %ptr, i64 %nnz)
ret i64 %len
}
; CHECK-LABEL: @fold_strnlen_ax_0(
; CHECK-NEXT: ret i64 0
;
- %ptr = getelementptr [0 x i8], [0 x i8]* @ax, i32 0, i32 0
- %len = call i64 @strnlen(i8* %ptr, i64 0)
+ %len = call i64 @strnlen(ptr @ax, i64 0)
ret i64 %len
}
define i64 @fold_strnlen_ax_1() {
; CHECK-LABEL: @fold_strnlen_ax_1(
-; CHECK-NEXT: [[STRNLEN_CHAR0:%.*]] = load i8, i8* getelementptr inbounds ([0 x i8], [0 x i8]* @ax, i64 0, i64 0), align 1
+; CHECK-NEXT: [[STRNLEN_CHAR0:%.*]] = load i8, ptr @ax, align 1
; CHECK-NEXT: [[STRNLEN_CHAR0CMP:%.*]] = icmp ne i8 [[STRNLEN_CHAR0]], 0
; CHECK-NEXT: [[TMP1:%.*]] = zext i1 [[STRNLEN_CHAR0CMP]] to i64
; CHECK-NEXT: ret i64 [[TMP1]]
;
- %ptr = getelementptr [0 x i8], [0 x i8]* @ax, i32 0, i32 0
- %len = call i64 @strnlen(i8* %ptr, i64 1)
+ %len = call i64 @strnlen(ptr @ax, i64 1)
ret i64 %len
}
; CHECK-LABEL: @fold_strnlen_s5_0(
; CHECK-NEXT: ret i64 0
;
- %ptr = getelementptr [6 x i8], [6 x i8]* @s5, i32 0, i32 0
- %len = call i64 @strnlen(i8* %ptr, i64 0)
+ %len = call i64 @strnlen(ptr @s5, i64 0)
ret i64 %len
}
; CHECK-LABEL: @fold_strnlen_s5_4(
; CHECK-NEXT: ret i64 4
;
- %ptr = getelementptr [6 x i8], [6 x i8]* @s5, i32 0, i32 0
- %len = call i64 @strnlen(i8* %ptr, i64 4)
+ %len = call i64 @strnlen(ptr @s5, i64 4)
ret i64 %len
}
; CHECK-LABEL: @fold_strnlen_s5_5(
; CHECK-NEXT: ret i64 5
;
- %ptr = getelementptr [6 x i8], [6 x i8]* @s5, i32 0, i32 0
- %len = call i64 @strnlen(i8* %ptr, i64 5)
+ %len = call i64 @strnlen(ptr @s5, i64 5)
ret i64 %len
}
; CHECK-LABEL: @fold_strnlen_s5_m1(
; CHECK-NEXT: ret i64 5
;
- %ptr = getelementptr [6 x i8], [6 x i8]* @s5, i32 0, i32 0
- %len = call i64 @strnlen(i8* %ptr, i64 -1)
+ %len = call i64 @strnlen(ptr @s5, i64 -1)
ret i64 %len
}
; CHECK-LABEL: @fold_strnlen_s5_3_p4_5(
; CHECK-NEXT: ret i64 1
;
- %ptr = getelementptr [9 x i8], [9 x i8]* @s5_3, i32 0, i32 4
- %len = call i64 @strnlen(i8* %ptr, i64 5)
+ %ptr = getelementptr [9 x i8], ptr @s5_3, i32 0, i32 4
+ %len = call i64 @strnlen(ptr %ptr, i64 5)
ret i64 %len
}
; CHECK-LABEL: @fold_strnlen_s5_3_p5_5(
; CHECK-NEXT: ret i64 0
;
- %ptr = getelementptr [9 x i8], [9 x i8]* @s5_3, i32 0, i32 5
- %len = call i64 @strnlen(i8* %ptr, i64 5)
+ %ptr = getelementptr [9 x i8], ptr @s5_3, i32 0, i32 5
+ %len = call i64 @strnlen(ptr %ptr, i64 5)
ret i64 %len
}
; CHECK-LABEL: @fold_strnlen_s5_3_p6_3(
; CHECK-NEXT: ret i64 3
;
- %ptr = getelementptr [9 x i8], [9 x i8]* @s5_3, i32 0, i32 6
- %len = call i64 @strnlen(i8* %ptr, i64 3)
+ %ptr = getelementptr [9 x i8], ptr @s5_3, i32 0, i32 6
+ %len = call i64 @strnlen(ptr %ptr, i64 3)
ret i64 %len
}
; CHECK-LABEL: @call_strnlen_s5_3_p6_4(
; CHECK-NEXT: ret i64 3
;
- %ptr = getelementptr [9 x i8], [9 x i8]* @s5_3, i32 0, i32 6
- %len = call i64 @strnlen(i8* %ptr, i64 4)
+ %ptr = getelementptr [9 x i8], ptr @s5_3, i32 0, i32 6
+ %len = call i64 @strnlen(ptr %ptr, i64 4)
ret i64 %len
}
;
; RUN: opt < %s -passes=instcombine -S | FileCheck %s
-declare i64 @strnlen(i8*, i64)
+declare i64 @strnlen(ptr, i64)
@sx = external global [0 x i8]
@a3 = constant [3 x i8] c"123"
; CHECK-NEXT: ret i64 0
;
- %ptr = getelementptr [0 x i8], [0 x i8]* @sx, i64 0, i64 %i
- %len = call i64 @strnlen(i8* %ptr, i64 0)
+ %ptr = getelementptr [0 x i8], ptr @sx, i64 0, i64 %i
+ %len = call i64 @strnlen(ptr %ptr, i64 0)
ret i64 %len
}
define i64 @call_strnlen_sx_pi_n(i64 %i, i64 %n) {
; CHECK-LABEL: @call_strnlen_sx_pi_n(
-; CHECK-NEXT: [[PTR:%.*]] = getelementptr inbounds [0 x i8], [0 x i8]* @sx, i64 0, i64 [[I:%.*]]
-; CHECK-NEXT: [[LEN:%.*]] = call i64 @strnlen(i8* nonnull [[PTR]], i64 [[N:%.*]])
+; CHECK-NEXT: [[PTR:%.*]] = getelementptr inbounds [0 x i8], ptr @sx, i64 0, i64 [[I:%.*]]
+; CHECK-NEXT: [[LEN:%.*]] = call i64 @strnlen(ptr nonnull [[PTR]], i64 [[N:%.*]])
; CHECK-NEXT: ret i64 [[LEN]]
;
- %ptr = getelementptr inbounds [0 x i8], [0 x i8]* @sx, i64 0, i64 %i
- %len = call i64 @strnlen(i8* %ptr, i64 %n)
+ %ptr = getelementptr inbounds [0 x i8], ptr @sx, i64 0, i64 %i
+ %len = call i64 @strnlen(ptr %ptr, i64 %n)
ret i64 %len
}
define i64 @call_strnlen_a3_pi_2(i64 %i) {
; CHECK-LABEL: @call_strnlen_a3_pi_2(
-; CHECK-NEXT: [[PTR:%.*]] = getelementptr inbounds [3 x i8], [3 x i8]* @a3, i64 0, i64 [[I:%.*]]
-; CHECK-NEXT: [[LEN:%.*]] = call i64 @strnlen(i8* noundef nonnull [[PTR]], i64 2)
+; CHECK-NEXT: [[PTR:%.*]] = getelementptr inbounds [3 x i8], ptr @a3, i64 0, i64 [[I:%.*]]
+; CHECK-NEXT: [[LEN:%.*]] = call i64 @strnlen(ptr noundef nonnull [[PTR]], i64 2)
; CHECK-NEXT: ret i64 [[LEN]]
;
- %ptr = getelementptr inbounds [3 x i8], [3 x i8]* @a3, i64 0, i64 %i
- %len = call i64 @strnlen(i8* %ptr, i64 2)
+ %ptr = getelementptr inbounds [3 x i8], ptr @a3, i64 0, i64 %i
+ %len = call i64 @strnlen(ptr %ptr, i64 2)
ret i64 %len
}
define i64 @call_strnlen_a3_pi_3(i64 %i) {
; CHECK-LABEL: @call_strnlen_a3_pi_3(
-; CHECK-NEXT: [[PTR:%.*]] = getelementptr inbounds [3 x i8], [3 x i8]* @a3, i64 0, i64 [[I:%.*]]
-; CHECK-NEXT: [[LEN:%.*]] = call i64 @strnlen(i8* noundef nonnull [[PTR]], i64 3)
+; CHECK-NEXT: [[PTR:%.*]] = getelementptr inbounds [3 x i8], ptr @a3, i64 0, i64 [[I:%.*]]
+; CHECK-NEXT: [[LEN:%.*]] = call i64 @strnlen(ptr noundef nonnull [[PTR]], i64 3)
; CHECK-NEXT: ret i64 [[LEN]]
;
- %ptr = getelementptr inbounds [3 x i8], [3 x i8]* @a3, i64 0, i64 %i
- %len = call i64 @strnlen(i8* %ptr, i64 3)
+ %ptr = getelementptr inbounds [3 x i8], ptr @a3, i64 0, i64 %i
+ %len = call i64 @strnlen(ptr %ptr, i64 3)
ret i64 %len
}
; CHECK-LABEL: @fold_strnlen_s3_pi_0(
; CHECK-NEXT: ret i64 0
;
- %ptr = getelementptr inbounds [4 x i8], [4 x i8]* @s3, i64 0, i64 %i
- %len = call i64 @strnlen(i8* %ptr, i64 0)
+ %ptr = getelementptr inbounds [4 x i8], ptr @s3, i64 0, i64 %i
+ %len = call i64 @strnlen(ptr %ptr, i64 0)
ret i64 %len
}
; CHECK-LABEL: @call_strnlen_s5_pi_0(
; CHECK-NEXT: ret i64 0
;
- %ptr = getelementptr [6 x i8], [6 x i8]* @s5, i32 0, i32 0
- %len = call i64 @strnlen(i8* %ptr, i64 0)
+ %len = call i64 @strnlen(ptr @s5, i64 0)
ret i64 %len
}
; CHECK-LABEL: @fold_strnlen_s5_3_pi_0(
; CHECK-NEXT: ret i64 0
;
- %ptr = getelementptr [10 x i8], [10 x i8]* @s5_3, i32 0, i64 %i
- %len = call i64 @strnlen(i8* %ptr, i64 0)
+ %ptr = getelementptr [10 x i8], ptr @s5_3, i32 0, i64 %i
+ %len = call i64 @strnlen(ptr %ptr, i64 0)
ret i64 %len
}
define i64 @call_strnlen_s5_3_pi_n(i64 zeroext %i, i64 %n) {
; CHECK-LABEL: @call_strnlen_s5_3_pi_n(
-; CHECK-NEXT: [[PTR:%.*]] = getelementptr inbounds [10 x i8], [10 x i8]* @s5_3, i64 0, i64 [[I:%.*]]
-; CHECK-NEXT: [[LEN:%.*]] = call i64 @strnlen(i8* nonnull [[PTR]], i64 [[N:%.*]])
+; CHECK-NEXT: [[PTR:%.*]] = getelementptr inbounds [10 x i8], ptr @s5_3, i64 0, i64 [[I:%.*]]
+; CHECK-NEXT: [[LEN:%.*]] = call i64 @strnlen(ptr nonnull [[PTR]], i64 [[N:%.*]])
; CHECK-NEXT: ret i64 [[LEN]]
;
- %ptr = getelementptr inbounds [10 x i8], [10 x i8]* @s5_3, i32 0, i64 %i
- %len = call i64 @strnlen(i8* %ptr, i64 %n)
+ %ptr = getelementptr inbounds [10 x i8], ptr @s5_3, i32 0, i64 %i
+ %len = call i64 @strnlen(ptr %ptr, i64 %n)
ret i64 %len
}
; CHECK-NEXT: ret i64 [[TMP1]]
;
- %ptr = getelementptr [3 x i8], [3 x i8]* @a3, i64 0, i64 0
- %len = call i64 @strnlen(i8* %ptr, i64 %n)
+ %len = call i64 @strnlen(ptr @a3, i64 %n)
ret i64 %len
}
; CHECK-NEXT: ret i64 [[TMP1]]
;
- %ptr = getelementptr [4 x i8], [4 x i8]* @s3, i64 0, i64 0
- %len = call i64 @strnlen(i8* %ptr, i64 %n)
+ %len = call i64 @strnlen(ptr @s3, i64 %n)
ret i64 %len
}
define i64 @fold_strnlen_a3_pi_2(i64 %i) {
; CHECK-LABEL: @fold_strnlen_a3_pi_2(
-; CHECK-NEXT: [[PTR:%.*]] = getelementptr inbounds [3 x i8], [3 x i8]* @a3, i64 0, i64 [[I:%.*]]
-; CHECK-NEXT: [[LEN:%.*]] = call i64 @strnlen(i8* noundef nonnull [[PTR]], i64 2)
+; CHECK-NEXT: [[PTR:%.*]] = getelementptr inbounds [3 x i8], ptr @a3, i64 0, i64 [[I:%.*]]
+; CHECK-NEXT: [[LEN:%.*]] = call i64 @strnlen(ptr noundef nonnull [[PTR]], i64 2)
; CHECK-NEXT: ret i64 [[LEN]]
;
- %ptr = getelementptr inbounds [3 x i8], [3 x i8]* @a3, i64 0, i64 %i
- %len = call i64 @strnlen(i8* %ptr, i64 2)
+ %ptr = getelementptr inbounds [3 x i8], ptr @a3, i64 0, i64 %i
+ %len = call i64 @strnlen(ptr %ptr, i64 2)
ret i64 %len
}
define i64 @fold_strnlen_s3_pi_2(i64 %i) {
; CHECK-LABEL: @fold_strnlen_s3_pi_2(
-; CHECK-NEXT: [[PTR:%.*]] = getelementptr inbounds [4 x i8], [4 x i8]* @s3, i64 0, i64 [[I:%.*]]
-; CHECK-NEXT: [[LEN:%.*]] = call i64 @strnlen(i8* noundef nonnull [[PTR]], i64 2)
+; CHECK-NEXT: [[PTR:%.*]] = getelementptr inbounds [4 x i8], ptr @s3, i64 0, i64 [[I:%.*]]
+; CHECK-NEXT: [[LEN:%.*]] = call i64 @strnlen(ptr noundef nonnull [[PTR]], i64 2)
; CHECK-NEXT: ret i64 [[LEN]]
;
- %ptr = getelementptr inbounds [4 x i8], [4 x i8]* @s3, i64 0, i64 %i
- %len = call i64 @strnlen(i8* %ptr, i64 2)
+ %ptr = getelementptr inbounds [4 x i8], ptr @s3, i64 0, i64 %i
+ %len = call i64 @strnlen(ptr %ptr, i64 2)
ret i64 %len
}
define i64 @fold_strnlen_s3_pi_3(i64 %i) {
; CHECK-LABEL: @fold_strnlen_s3_pi_3(
-; CHECK-NEXT: [[PTR:%.*]] = getelementptr inbounds [4 x i8], [4 x i8]* @s3, i64 0, i64 [[I:%.*]]
-; CHECK-NEXT: [[LEN:%.*]] = call i64 @strnlen(i8* noundef nonnull [[PTR]], i64 3)
+; CHECK-NEXT: [[PTR:%.*]] = getelementptr inbounds [4 x i8], ptr @s3, i64 0, i64 [[I:%.*]]
+; CHECK-NEXT: [[LEN:%.*]] = call i64 @strnlen(ptr noundef nonnull [[PTR]], i64 3)
; CHECK-NEXT: ret i64 [[LEN]]
;
- %ptr = getelementptr inbounds [4 x i8], [4 x i8]* @s3, i64 0, i64 %i
- %len = call i64 @strnlen(i8* %ptr, i64 3)
+ %ptr = getelementptr inbounds [4 x i8], ptr @s3, i64 0, i64 %i
+ %len = call i64 @strnlen(ptr %ptr, i64 3)
ret i64 %len
}
define i64 @fold_strnlen_s3_pi_n(i64 %i, i64 %n) {
; CHECK-LABEL: @fold_strnlen_s3_pi_n(
-; CHECK-NEXT: [[PTR:%.*]] = getelementptr inbounds [4 x i8], [4 x i8]* @s3, i64 0, i64 [[I:%.*]]
-; CHECK-NEXT: [[LEN:%.*]] = call i64 @strnlen(i8* nonnull [[PTR]], i64 [[N:%.*]])
+; CHECK-NEXT: [[PTR:%.*]] = getelementptr inbounds [4 x i8], ptr @s3, i64 0, i64 [[I:%.*]]
+; CHECK-NEXT: [[LEN:%.*]] = call i64 @strnlen(ptr nonnull [[PTR]], i64 [[N:%.*]])
; CHECK-NEXT: ret i64 [[LEN]]
;
- %ptr = getelementptr inbounds [4 x i8], [4 x i8]* @s3, i64 0, i64 %i
- %len = call i64 @strnlen(i8* %ptr, i64 %n)
+ %ptr = getelementptr inbounds [4 x i8], ptr @s3, i64 0, i64 %i
+ %len = call i64 @strnlen(ptr %ptr, i64 %n)
ret i64 %len
}
define i64 @call_strnlen_s5_3_pi_2(i64 %i) {
; CHECK-LABEL: @call_strnlen_s5_3_pi_2(
-; CHECK-NEXT: [[PTR:%.*]] = getelementptr inbounds [10 x i8], [10 x i8]* @s5_3, i64 0, i64 [[I:%.*]]
-; CHECK-NEXT: [[LEN:%.*]] = call i64 @strnlen(i8* noundef nonnull [[PTR]], i64 2)
+; CHECK-NEXT: [[PTR:%.*]] = getelementptr inbounds [10 x i8], ptr @s5_3, i64 0, i64 [[I:%.*]]
+; CHECK-NEXT: [[LEN:%.*]] = call i64 @strnlen(ptr noundef nonnull [[PTR]], i64 2)
; CHECK-NEXT: ret i64 [[LEN]]
;
- %ptr = getelementptr inbounds [10 x i8], [10 x i8]* @s5_3, i64 0, i64 %i
- %len = call i64 @strnlen(i8* %ptr, i64 2)
+ %ptr = getelementptr inbounds [10 x i8], ptr @s5_3, i64 0, i64 %i
+ %len = call i64 @strnlen(ptr %ptr, i64 2)
ret i64 %len
}
;
; RUN: opt < %s -passes=instcombine -S | FileCheck %s
-declare i64 @strnlen(i8*, i64)
+declare i64 @strnlen(ptr, i64)
-@ecp = external global i8*, align 8
+@ecp = external global ptr, align 8
; Annotate strnlen(ecp, 3) call with noundef, nonnull, and dereferenceable
define i64 @deref_strnlen_ecp_3() {
; CHECK-LABEL: @deref_strnlen_ecp_3(
-; CHECK-NEXT: [[PTR:%.*]] = load i8*, i8** @ecp, align 8
-; CHECK-NEXT: [[LEN:%.*]] = call i64 @strnlen(i8* noundef nonnull dereferenceable(1) [[PTR]], i64 3)
+; CHECK-NEXT: [[PTR:%.*]] = load ptr, ptr @ecp, align 8
+; CHECK-NEXT: [[LEN:%.*]] = call i64 @strnlen(ptr noundef nonnull dereferenceable(1) [[PTR]], i64 3)
; CHECK-NEXT: ret i64 [[LEN]]
;
- %ptr = load i8*, i8** @ecp
- %len = call i64 @strnlen(i8* %ptr, i64 3)
+ %ptr = load ptr, ptr @ecp
+ %len = call i64 @strnlen(ptr %ptr, i64 3)
ret i64 %len
}
define i64 @deref_strnlen_ecp_nz(i64 %n) {
; CHECK-LABEL: @deref_strnlen_ecp_nz(
; CHECK-NEXT: [[NONZERO:%.*]] = or i64 [[N:%.*]], 1
-; CHECK-NEXT: [[PTR:%.*]] = load i8*, i8** @ecp, align 8
-; CHECK-NEXT: [[LEN:%.*]] = call i64 @strnlen(i8* noundef nonnull dereferenceable(1) [[PTR]], i64 [[NONZERO]])
+; CHECK-NEXT: [[PTR:%.*]] = load ptr, ptr @ecp, align 8
+; CHECK-NEXT: [[LEN:%.*]] = call i64 @strnlen(ptr noundef nonnull dereferenceable(1) [[PTR]], i64 [[NONZERO]])
; CHECK-NEXT: ret i64 [[LEN]]
;
%nonzero = or i64 %n, 1
- %ptr = load i8*, i8** @ecp
- %len = call i64 @strnlen(i8* %ptr, i64 %nonzero)
+ %ptr = load ptr, ptr @ecp
+ %len = call i64 @strnlen(ptr %ptr, i64 %nonzero)
ret i64 %len
}
define i64 @noderef_strnlen_ecp_n(i64 %n) {
; CHECK-LABEL: @noderef_strnlen_ecp_n(
-; CHECK-NEXT: [[PTR:%.*]] = load i8*, i8** @ecp, align 8
-; CHECK-NEXT: [[LEN:%.*]] = call i64 @strnlen(i8* [[PTR]], i64 [[N:%.*]])
+; CHECK-NEXT: [[PTR:%.*]] = load ptr, ptr @ecp, align 8
+; CHECK-NEXT: [[LEN:%.*]] = call i64 @strnlen(ptr [[PTR]], i64 [[N:%.*]])
; CHECK-NEXT: ret i64 [[LEN]]
;
- %ptr = load i8*, i8** @ecp
- %len = call i64 @strnlen(i8* %ptr, i64 %n)
+ %ptr = load ptr, ptr @ecp
+ %len = call i64 @strnlen(ptr %ptr, i64 %n)
ret i64 %len
}
@w = constant [2 x i8] c"w\00"
@null = constant [1 x i8] zeroinitializer
-declare i8* @strpbrk(i8*, i8*)
+declare ptr @strpbrk(ptr, ptr)
; Check strpbrk(s, "") -> NULL.
-define i8* @test_simplify1(i8* %str) {
+define ptr @test_simplify1(ptr %str) {
; CHECK-LABEL: @test_simplify1(
-; CHECK-NEXT: ret i8* null
+; CHECK-NEXT: ret ptr null
;
- %pat = getelementptr [1 x i8], [1 x i8]* @null, i32 0, i32 0
- %ret = call i8* @strpbrk(i8* %str, i8* %pat)
- ret i8* %ret
+ %ret = call ptr @strpbrk(ptr %str, ptr @null)
+ ret ptr %ret
}
; Check strpbrk("", s) -> NULL.
-define i8* @test_simplify2(i8* %pat) {
+define ptr @test_simplify2(ptr %pat) {
; CHECK-LABEL: @test_simplify2(
-; CHECK-NEXT: ret i8* null
+; CHECK-NEXT: ret ptr null
;
- %str = getelementptr [1 x i8], [1 x i8]* @null, i32 0, i32 0
- %ret = call i8* @strpbrk(i8* %str, i8* %pat)
- ret i8* %ret
+ %ret = call ptr @strpbrk(ptr @null, ptr %pat)
+ ret ptr %ret
}
; Check strpbrk(s1, s2), where s1 and s2 are constants.
-define i8* @test_simplify3() {
+define ptr @test_simplify3() {
; CHECK-LABEL: @test_simplify3(
-; CHECK-NEXT: ret i8* getelementptr inbounds ([12 x i8], [12 x i8]* @hello, i32 0, i32 6)
+; CHECK-NEXT: ret ptr getelementptr inbounds ([12 x i8], ptr @hello, i32 0, i32 6)
;
- %str = getelementptr [12 x i8], [12 x i8]* @hello, i32 0, i32 0
- %pat = getelementptr [2 x i8], [2 x i8]* @w, i32 0, i32 0
- %ret = call i8* @strpbrk(i8* %str, i8* %pat)
- ret i8* %ret
+ %ret = call ptr @strpbrk(ptr @hello, ptr @w)
+ ret ptr %ret
}
; Check strpbrk(s, "a") -> strchr(s, 'a').
-define i8* @test_simplify4(i8* %str) {
+define ptr @test_simplify4(ptr %str) {
; CHECK-LABEL: @test_simplify4(
-; CHECK-NEXT: [[STRCHR:%.*]] = call i8* @strchr(i8* noundef nonnull dereferenceable(1) [[STR:%.*]], i32 119)
-; CHECK-NEXT: ret i8* [[STRCHR]]
+; CHECK-NEXT: [[STRCHR:%.*]] = call ptr @strchr(ptr noundef nonnull dereferenceable(1) [[STR:%.*]], i32 119)
+; CHECK-NEXT: ret ptr [[STRCHR]]
;
- %pat = getelementptr [2 x i8], [2 x i8]* @w, i32 0, i32 0
- %ret = call i8* @strpbrk(i8* %str, i8* %pat)
- ret i8* %ret
+ %ret = call ptr @strpbrk(ptr %str, ptr @w)
+ ret ptr %ret
}
; Check cases that shouldn't be simplified.
-define i8* @test_no_simplify1(i8* %str, i8* %pat) {
+define ptr @test_no_simplify1(ptr %str, ptr %pat) {
; CHECK-LABEL: @test_no_simplify1(
-; CHECK-NEXT: [[RET:%.*]] = call i8* @strpbrk(i8* [[STR:%.*]], i8* [[PAT:%.*]])
-; CHECK-NEXT: ret i8* [[RET]]
+; CHECK-NEXT: [[RET:%.*]] = call ptr @strpbrk(ptr [[STR:%.*]], ptr [[PAT:%.*]])
+; CHECK-NEXT: ret ptr [[RET]]
;
- %ret = call i8* @strpbrk(i8* %str, i8* %pat)
- ret i8* %ret
+ %ret = call ptr @strpbrk(ptr %str, ptr %pat)
+ ret ptr %ret
}
@hello = constant [12 x i8] c"hello world\00"
@w = constant [2 x i8] c"w\00"
-declare i8 @strpbrk(i8*, i8*)
+declare i8 @strpbrk(ptr, ptr)
; Check that 'strpbrk' functions with the wrong prototype aren't simplified.
define i8 @test_no_simplify1() {
; CHECK-LABEL: @test_no_simplify1(
- %str = getelementptr [12 x i8], [12 x i8]* @hello, i32 0, i32 0
- %pat = getelementptr [2 x i8], [2 x i8]* @w, i32 0, i32 0
- %ret = call i8 @strpbrk(i8* %str, i8* %pat)
+ %ret = call i8 @strpbrk(ptr @hello, ptr @w)
; CHECK-NEXT: %ret = call i8 @strpbrk
ret i8 %ret
; CHECK-NEXT: ret i8 %ret
@hello = constant [14 x i8] c"hello world\5Cn\00"
@null = constant [1 x i8] zeroinitializer
-@chp = global i8* zeroinitializer
+@chp = global ptr zeroinitializer
-declare i8* @strrchr(i8*, i32)
+declare ptr @strrchr(ptr, i32)
define void @test_simplify1() {
; CHECK-LABEL: @test_simplify1(
-; CHECK-NEXT: store i8* getelementptr inbounds ([14 x i8], [14 x i8]* @hello, i32 0, i32 6), i8** @chp, align 4
+; CHECK-NEXT: store ptr getelementptr inbounds ([14 x i8], ptr @hello, i32 0, i32 6), ptr @chp, align 4
; CHECK-NEXT: ret void
;
- %str = getelementptr [14 x i8], [14 x i8]* @hello, i32 0, i32 0
- %dst = call i8* @strrchr(i8* %str, i32 119)
- store i8* %dst, i8** @chp
+ %dst = call ptr @strrchr(ptr @hello, i32 119)
+ store ptr %dst, ptr @chp
ret void
}
define void @test_simplify2() {
; CHECK-LABEL: @test_simplify2(
-; CHECK-NEXT: store i8* null, i8** @chp, align 4
+; CHECK-NEXT: store ptr null, ptr @chp, align 4
; CHECK-NEXT: ret void
;
- %str = getelementptr [1 x i8], [1 x i8]* @null, i32 0, i32 0
- %dst = call i8* @strrchr(i8* %str, i32 119)
- store i8* %dst, i8** @chp
+ %dst = call ptr @strrchr(ptr @null, i32 119)
+ store ptr %dst, ptr @chp
ret void
}
define void @test_simplify3() {
; CHECK-LABEL: @test_simplify3(
-; CHECK-NEXT: store i8* getelementptr inbounds ([14 x i8], [14 x i8]* @hello, i32 0, i32 13), i8** @chp, align 4
+; CHECK-NEXT: store ptr getelementptr inbounds ([14 x i8], ptr @hello, i32 0, i32 13), ptr @chp, align 4
; CHECK-NEXT: ret void
;
- %src = getelementptr [14 x i8], [14 x i8]* @hello, i32 0, i32 0
- %dst = call i8* @strrchr(i8* %src, i32 0)
- store i8* %dst, i8** @chp
+ %dst = call ptr @strrchr(ptr @hello, i32 0)
+ store ptr %dst, ptr @chp
ret void
}
define void @test_simplify4() {
; CHECK-LABEL: @test_simplify4(
-; CHECK-NEXT: store i8* getelementptr inbounds ([14 x i8], [14 x i8]* @hello, i32 0, i32 13), i8** @chp, align 4
+; CHECK-NEXT: store ptr getelementptr inbounds ([14 x i8], ptr @hello, i32 0, i32 13), ptr @chp, align 4
; CHECK-NEXT: ret void
;
- %src = getelementptr [14 x i8], [14 x i8]* @hello, i32 0, i32 0
- %dst = call i8* @strrchr(i8* %src, i32 65280)
- store i8* %dst, i8** @chp
+ %dst = call ptr @strrchr(ptr @hello, i32 65280)
+ store ptr %dst, ptr @chp
ret void
}
define void @test_xform_to_memrchr(i32 %chr) {
; CHECK-LABEL: @test_xform_to_memrchr(
-; CHECK-NEXT: [[MEMRCHR:%.*]] = call i8* @memrchr(i8* noundef nonnull dereferenceable(14) getelementptr inbounds ([14 x i8], [14 x i8]* @hello, i32 0, i32 0), i32 [[CHR:%.*]], i32 14)
-; CHECK-NEXT: store i8* [[MEMRCHR]], i8** @chp, align 4
+; CHECK-NEXT: [[MEMRCHR:%.*]] = call ptr @memrchr(ptr noundef nonnull dereferenceable(14) @hello, i32 [[CHR:%.*]], i32 14)
+; CHECK-NEXT: store ptr [[MEMRCHR]], ptr @chp, align 4
; CHECK-NEXT: ret void
;
- %src = getelementptr [14 x i8], [14 x i8]* @hello, i32 0, i32 0
- %dst = call i8* @strrchr(i8* %src, i32 %chr)
- store i8* %dst, i8** @chp
+ %dst = call ptr @strrchr(ptr @hello, i32 %chr)
+ store ptr %dst, ptr @chp
ret void
}
-define i8* @test1(i8* %str, i32 %c) {
+define ptr @test1(ptr %str, i32 %c) {
; CHECK-LABEL: @test1(
-; CHECK-NEXT: [[RET:%.*]] = call i8* @strrchr(i8* noundef nonnull dereferenceable(1) [[STR:%.*]], i32 [[C:%.*]])
-; CHECK-NEXT: ret i8* [[RET]]
+; CHECK-NEXT: [[RET:%.*]] = call ptr @strrchr(ptr noundef nonnull dereferenceable(1) [[STR:%.*]], i32 [[C:%.*]])
+; CHECK-NEXT: ret ptr [[RET]]
;
- %ret = call i8* @strrchr(i8* %str, i32 %c)
- ret i8* %ret
+ %ret = call ptr @strrchr(ptr %str, i32 %c)
+ ret ptr %ret
}
-define i8* @test2(i8* %str, i32 %c) null_pointer_is_valid {
+define ptr @test2(ptr %str, i32 %c) null_pointer_is_valid {
; CHECK-LABEL: @test2(
-; CHECK-NEXT: [[RET:%.*]] = call i8* @strrchr(i8* noundef [[STR:%.*]], i32 [[C:%.*]])
-; CHECK-NEXT: ret i8* [[RET]]
+; CHECK-NEXT: [[RET:%.*]] = call ptr @strrchr(ptr noundef [[STR:%.*]], i32 [[C:%.*]])
+; CHECK-NEXT: ret ptr [[RET]]
;
- %ret = call i8* @strrchr(i8* %str, i32 %c)
- ret i8* %ret
+ %ret = call ptr @strrchr(ptr %str, i32 %c)
+ ret ptr %ret
}
@hello = constant [14 x i8] c"hello world\5Cn\00"
@chr = global i8 zeroinitializer
-declare i8 @strrchr(i8*, i32)
+declare i8 @strrchr(ptr, i32)
define void @test_nosimplify1() {
; CHECK: test_nosimplify1
; CHECK: call i8 @strrchr
; CHECK: ret void
- %str = getelementptr [14 x i8], [14 x i8]* @hello, i32 0, i32 0
- %dst = call i8 @strrchr(i8* %str, i32 119)
- store i8 %dst, i8* @chr
+ %dst = call i8 @strrchr(ptr @hello, i32 119)
+ store i8 %dst, ptr @chr
ret void
}
@s10 = constant [11 x i8] c"0123456789\00"
-declare i8* @strrchr(i8*, i32)
+declare ptr @strrchr(ptr, i32)
; Fold strrchr(s + 10, c) to (unsigned char)c ? 0 : s + 10.
-define i8* @fold_strrchr_sp10_x(i32 %c) {
+define ptr @fold_strrchr_sp10_x(i32 %c) {
; CHECK-LABEL: @fold_strrchr_sp10_x(
; CHECK-NEXT: [[TMP1:%.*]] = trunc i32 [[C:%.*]] to i8
; CHECK-NEXT: [[MEMRCHR_CHAR0CMP:%.*]] = icmp eq i8 [[TMP1]], 0
-; CHECK-NEXT: [[MEMRCHR_SEL:%.*]] = select i1 [[MEMRCHR_CHAR0CMP]], i8* getelementptr inbounds ([11 x i8], [11 x i8]* @s10, i64 0, i64 10), i8* null
-; CHECK-NEXT: ret i8* [[MEMRCHR_SEL]]
+; CHECK-NEXT: [[MEMRCHR_SEL:%.*]] = select i1 [[MEMRCHR_CHAR0CMP]], ptr getelementptr inbounds ([11 x i8], ptr @s10, i64 0, i64 10), ptr null
+; CHECK-NEXT: ret ptr [[MEMRCHR_SEL]]
;
- %psp10 = getelementptr [11 x i8], [11 x i8]* @s10, i32 0, i32 10
- %pc = call i8* @strrchr(i8* %psp10, i32 %c)
- ret i8* %pc
+ %psp10 = getelementptr [11 x i8], ptr @s10, i32 0, i32 10
+ %pc = call ptr @strrchr(ptr %psp10, i32 %c)
+ ret ptr %pc
}
; Transform strrchr(s + 9, c) to [the equivalent of] memrchr(s + 9, c, 2).
-define i8* @call_strrchr_sp9_x(i32 %c) {
+define ptr @call_strrchr_sp9_x(i32 %c) {
; CHECK-LABEL: @call_strrchr_sp9_x(
-; CHECK-NEXT: [[MEMRCHR:%.*]] = call i8* @memrchr(i8* noundef nonnull dereferenceable(2) getelementptr inbounds ([11 x i8], [11 x i8]* @s10, i64 0, i64 9), i32 [[C:%.*]], i64 2)
-; CHECK-NEXT: ret i8* [[MEMRCHR]]
+; CHECK-NEXT: [[MEMRCHR:%.*]] = call ptr @memrchr(ptr noundef nonnull dereferenceable(2) getelementptr inbounds ([11 x i8], ptr @s10, i64 0, i64 9), i32 [[C:%.*]], i64 2)
+; CHECK-NEXT: ret ptr [[MEMRCHR]]
;
- %psp9 = getelementptr [11 x i8], [11 x i8]* @s10, i32 0, i32 9
- %pc = call i8* @strrchr(i8* %psp9, i32 %c)
- ret i8* %pc
+ %psp9 = getelementptr [11 x i8], ptr @s10, i32 0, i32 9
+ %pc = call ptr @strrchr(ptr %psp9, i32 %c)
+ ret ptr %pc
}
; Do not transform strrchr(s + 2, c) (for short strings this could be
; folded into a chain of OR expressions ala D128011).
-define i8* @call_strrchr_sp2_x(i32 %c) {
+define ptr @call_strrchr_sp2_x(i32 %c) {
; CHECK-LABEL: @call_strrchr_sp2_x(
-; CHECK-NEXT: [[MEMRCHR:%.*]] = call i8* @memrchr(i8* noundef nonnull dereferenceable(9) getelementptr inbounds ([11 x i8], [11 x i8]* @s10, i64 0, i64 2), i32 [[C:%.*]], i64 9)
-; CHECK-NEXT: ret i8* [[MEMRCHR]]
+; CHECK-NEXT: [[MEMRCHR:%.*]] = call ptr @memrchr(ptr noundef nonnull dereferenceable(9) getelementptr inbounds ([11 x i8], ptr @s10, i64 0, i64 2), i32 [[C:%.*]], i64 9)
+; CHECK-NEXT: ret ptr [[MEMRCHR]]
;
- %psp2 = getelementptr [11 x i8], [11 x i8]* @s10, i32 0, i32 2
- %pc = call i8* @strrchr(i8* %psp2, i32 %c)
- ret i8* %pc
+ %psp2 = getelementptr [11 x i8], ptr @s10, i32 0, i32 2
+ %pc = call ptr @strrchr(ptr %psp2, i32 %c)
+ ret ptr %pc
}
; Do not transform strrchr(s + 1, c).
-define i8* @call_strrchr_sp1_x(i32 %c) {
+define ptr @call_strrchr_sp1_x(i32 %c) {
; CHECK-LABEL: @call_strrchr_sp1_x(
-; CHECK-NEXT: [[MEMRCHR:%.*]] = call i8* @memrchr(i8* noundef nonnull dereferenceable(10) getelementptr inbounds ([11 x i8], [11 x i8]* @s10, i64 0, i64 1), i32 [[C:%.*]], i64 10)
-; CHECK-NEXT: ret i8* [[MEMRCHR]]
+; CHECK-NEXT: [[MEMRCHR:%.*]] = call ptr @memrchr(ptr noundef nonnull dereferenceable(10) getelementptr inbounds ([11 x i8], ptr @s10, i64 0, i64 1), i32 [[C:%.*]], i64 10)
+; CHECK-NEXT: ret ptr [[MEMRCHR]]
;
- %psp1 = getelementptr [11 x i8], [11 x i8]* @s10, i32 0, i32 1
- %pc = call i8* @strrchr(i8* %psp1, i32 %c)
- ret i8* %pc
+ %psp1 = getelementptr [11 x i8], ptr @s10, i32 0, i32 1
+ %pc = call ptr @strrchr(ptr %psp1, i32 %c)
+ ret ptr %pc
}
@abc = constant [4 x i8] c"abc\00"
@null = constant [1 x i8] zeroinitializer
-declare i64 @strspn(i8*, i8*)
+declare i64 @strspn(ptr, ptr)
; Check strspn(s, "") -> 0.
-define i64 @test_simplify1(i8* %str) {
+define i64 @test_simplify1(ptr %str) {
; CHECK-LABEL: @test_simplify1(
- %pat = getelementptr [1 x i8], [1 x i8]* @null, i32 0, i32 0
- %ret = call i64 @strspn(i8* %str, i8* %pat)
+ %ret = call i64 @strspn(ptr %str, ptr @null)
ret i64 %ret
; CHECK-NEXT: ret i64 0
}
; Check strspn("", s) -> 0.
-define i64 @test_simplify2(i8* %pat) {
+define i64 @test_simplify2(ptr %pat) {
; CHECK-LABEL: @test_simplify2(
- %str = getelementptr [1 x i8], [1 x i8]* @null, i32 0, i32 0
- %ret = call i64 @strspn(i8* %str, i8* %pat)
+ %ret = call i64 @strspn(ptr @null, ptr %pat)
ret i64 %ret
; CHECK-NEXT: ret i64 0
}
define i64 @test_simplify3() {
; CHECK-LABEL: @test_simplify3(
- %str = getelementptr [6 x i8], [6 x i8]* @abcba, i32 0, i32 0
- %pat = getelementptr [4 x i8], [4 x i8]* @abc, i32 0, i32 0
- %ret = call i64 @strspn(i8* %str, i8* %pat)
+ %ret = call i64 @strspn(ptr @abcba, ptr @abc)
ret i64 %ret
; CHECK-NEXT: ret i64 5
}
; Check cases that shouldn't be simplified.
-define i64 @test_no_simplify1(i8* %str, i8* %pat) {
+define i64 @test_no_simplify1(ptr %str, ptr %pat) {
; CHECK-LABEL: @test_no_simplify1(
- %ret = call i64 @strspn(i8* %str, i8* %pat)
-; CHECK-NEXT: %ret = call i64 @strspn(i8* %str, i8* %pat)
+ %ret = call i64 @strspn(ptr %str, ptr %pat)
+; CHECK-NEXT: %ret = call i64 @strspn(ptr %str, ptr %pat)
ret i64 %ret
; CHECK-NEXT: ret i64 %ret
}
@.str2 = private constant [6 x i8] c"abcde\00"
@.str3 = private constant [4 x i8] c"bcd\00"
-declare i8* @strstr(i8*, i8*)
+declare ptr @strstr(ptr, ptr)
; Check strstr(str, "") -> str.
-define i8* @test_simplify1(i8* %str) {
+define ptr @test_simplify1(ptr %str) {
; CHECK-LABEL: @test_simplify1(
-; CHECK-NEXT: ret i8* [[STR:%.*]]
+; CHECK-NEXT: ret ptr [[STR:%.*]]
;
- %pat = getelementptr inbounds [1 x i8], [1 x i8]* @.str, i32 0, i32 0
- %ret = call i8* @strstr(i8* %str, i8* %pat)
- ret i8* %ret
+ %ret = call ptr @strstr(ptr %str, ptr @.str)
+ ret ptr %ret
}
; Check strstr(str, "a") -> strchr(str, 'a').
-define i8* @test_simplify2(i8* %str) {
+define ptr @test_simplify2(ptr %str) {
; CHECK-LABEL: @test_simplify2(
-; CHECK-NEXT: [[STRCHR:%.*]] = call i8* @strchr(i8* noundef nonnull dereferenceable(1) [[STR:%.*]], i32 97)
-; CHECK-NEXT: ret i8* [[STRCHR]]
+; CHECK-NEXT: [[STRCHR:%.*]] = call ptr @strchr(ptr noundef nonnull dereferenceable(1) [[STR:%.*]], i32 97)
+; CHECK-NEXT: ret ptr [[STRCHR]]
;
- %pat = getelementptr inbounds [2 x i8], [2 x i8]* @.str1, i32 0, i32 0
- %ret = call i8* @strstr(i8* %str, i8* %pat)
- ret i8* %ret
+ %ret = call ptr @strstr(ptr %str, ptr @.str1)
+ ret ptr %ret
}
; Check strstr("abcde", "bcd") -> "abcde" + 1.
-define i8* @test_simplify3() {
+define ptr @test_simplify3() {
; CHECK-LABEL: @test_simplify3(
-; CHECK-NEXT: ret i8* getelementptr inbounds ([6 x i8], [6 x i8]* @.str2, i64 0, i64 1)
+; CHECK-NEXT: ret ptr getelementptr inbounds ([6 x i8], ptr @.str2, i64 0, i64 1)
;
- %str = getelementptr inbounds [6 x i8], [6 x i8]* @.str2, i32 0, i32 0
- %pat = getelementptr inbounds [4 x i8], [4 x i8]* @.str3, i32 0, i32 0
- %ret = call i8* @strstr(i8* %str, i8* %pat)
- ret i8* %ret
+ %ret = call ptr @strstr(ptr @.str2, ptr @.str3)
+ ret ptr %ret
}
; Check strstr(str, str) -> str.
-define i8* @test_simplify4(i8* %str) {
+define ptr @test_simplify4(ptr %str) {
; CHECK-LABEL: @test_simplify4(
-; CHECK-NEXT: ret i8* [[STR:%.*]]
+; CHECK-NEXT: ret ptr [[STR:%.*]]
;
- %ret = call i8* @strstr(i8* %str, i8* %str)
- ret i8* %ret
+ %ret = call ptr @strstr(ptr %str, ptr %str)
+ ret ptr %ret
}
; Check strstr(str, pat) == str -> strncmp(str, pat, strlen(str)) == 0.
-define i1 @test_simplify5(i8* %str, i8* %pat) {
+define i1 @test_simplify5(ptr %str, ptr %pat) {
; CHECK-LABEL: @test_simplify5(
-; CHECK-NEXT: [[STRLEN:%.*]] = call i64 @strlen(i8* noundef nonnull dereferenceable(1) [[PAT:%.*]])
-; CHECK-NEXT: [[STRNCMP:%.*]] = call i32 @strncmp(i8* [[STR:%.*]], i8* [[PAT]], i64 [[STRLEN]])
+; CHECK-NEXT: [[STRLEN:%.*]] = call i64 @strlen(ptr noundef nonnull dereferenceable(1) [[PAT:%.*]])
+; CHECK-NEXT: [[STRNCMP:%.*]] = call i32 @strncmp(ptr [[STR:%.*]], ptr [[PAT]], i64 [[STRLEN]])
; CHECK-NEXT: [[CMP1:%.*]] = icmp eq i32 [[STRNCMP]], 0
; CHECK-NEXT: ret i1 [[CMP1]]
;
- %ret = call i8* @strstr(i8* %str, i8* %pat)
- %cmp = icmp eq i8* %ret, %str
+ %ret = call ptr @strstr(ptr %str, ptr %pat)
+ %cmp = icmp eq ptr %ret, %str
ret i1 %cmp
}
-define i8* @test1(i8* %str1, i8* %str2) {
+define ptr @test1(ptr %str1, ptr %str2) {
; CHECK-LABEL: @test1(
-; CHECK-NEXT: [[RET:%.*]] = call i8* @strstr(i8* noundef nonnull dereferenceable(1) [[STR1:%.*]], i8* noundef nonnull dereferenceable(1) [[STR2:%.*]])
-; CHECK-NEXT: ret i8* [[RET]]
+; CHECK-NEXT: [[RET:%.*]] = call ptr @strstr(ptr noundef nonnull dereferenceable(1) [[STR1:%.*]], ptr noundef nonnull dereferenceable(1) [[STR2:%.*]])
+; CHECK-NEXT: ret ptr [[RET]]
;
- %ret = call i8* @strstr(i8* %str1, i8* %str2)
- ret i8* %ret
+ %ret = call ptr @strstr(ptr %str1, ptr %str2)
+ ret ptr %ret
}
-define i8* @test2(i8* %str1, i8* %str2) null_pointer_is_valid {
+define ptr @test2(ptr %str1, ptr %str2) null_pointer_is_valid {
; CHECK-LABEL: @test2(
-; CHECK-NEXT: [[RET:%.*]] = call i8* @strstr(i8* noundef [[STR1:%.*]], i8* noundef [[STR2:%.*]])
-; CHECK-NEXT: ret i8* [[RET]]
+; CHECK-NEXT: [[RET:%.*]] = call ptr @strstr(ptr noundef [[STR1:%.*]], ptr noundef [[STR2:%.*]])
+; CHECK-NEXT: ret ptr [[RET]]
;
- %ret = call i8* @strstr(i8* %str1, i8* %str2)
- ret i8* %ret
+ %ret = call ptr @strstr(ptr %str1, ptr %str2)
+ ret ptr %ret
}
@null = private constant [1 x i8] zeroinitializer
-declare i8 @strstr(i8*, i8*)
+declare i8 @strstr(ptr, ptr)
-define i8 @test_no_simplify1(i8* %str) {
+define i8 @test_no_simplify1(ptr %str) {
; CHECK-LABEL: @test_no_simplify1(
- %pat = getelementptr inbounds [1 x i8], [1 x i8]* @null, i32 0, i32 0
- %ret = call i8 @strstr(i8* %str, i8* %pat)
+ %ret = call i8 @strstr(ptr %str, ptr @null)
; CHECK-NEXT: call i8 @strstr
ret i8 %ret
; CHECK-NEXT: ret i8 %ret
target datalayout = "e-p:32:32:32-i1:8:8-i8:8:8-i16:16:16-i32:32:32-i64:32:64-f32:32:32-f64:32:64-v64:64:64-v128:128:128-a0:0:64-f80:128:128"
-declare i32 @strtol(i8* %s, i8** %endptr, i32 %base)
-; CHECK: declare i32 @strtol(i8* readonly, i8** nocapture, i32)
+declare i32 @strtol(ptr %s, ptr %endptr, i32 %base)
+; CHECK: declare i32 @strtol(ptr readonly, ptr nocapture, i32)
-declare double @strtod(i8* %s, i8** %endptr)
-; CHECK: declare double @strtod(i8* readonly, i8** nocapture)
+declare double @strtod(ptr %s, ptr %endptr)
+; CHECK: declare double @strtod(ptr readonly, ptr nocapture)
-declare float @strtof(i8* %s, i8** %endptr)
-; CHECK: declare float @strtof(i8* readonly, i8** nocapture)
+declare float @strtof(ptr %s, ptr %endptr)
+; CHECK: declare float @strtof(ptr readonly, ptr nocapture)
-declare i64 @strtoul(i8* %s, i8** %endptr, i32 %base)
-; CHECK: declare i64 @strtoul(i8* readonly, i8** nocapture, i32)
+declare i64 @strtoul(ptr %s, ptr %endptr, i32 %base)
+; CHECK: declare i64 @strtoul(ptr readonly, ptr nocapture, i32)
-declare i64 @strtoll(i8* %s, i8** %endptr, i32 %base)
-; CHECK: declare i64 @strtoll(i8* readonly, i8** nocapture, i32)
+declare i64 @strtoll(ptr %s, ptr %endptr, i32 %base)
+; CHECK: declare i64 @strtoll(ptr readonly, ptr nocapture, i32)
-declare double @strtold(i8* %s, i8** %endptr)
-; CHECK: declare double @strtold(i8* readonly, i8** nocapture)
+declare double @strtold(ptr %s, ptr %endptr)
+; CHECK: declare double @strtold(ptr readonly, ptr nocapture)
-declare i64 @strtoull(i8* %s, i8** %endptr, i32 %base)
-; CHECK: declare i64 @strtoull(i8* readonly, i8** nocapture, i32)
+declare i64 @strtoull(ptr %s, ptr %endptr, i32 %base)
+; CHECK: declare i64 @strtoull(ptr readonly, ptr nocapture, i32)
-define void @test_simplify1(i8* %x, i8** %endptr) {
+define void @test_simplify1(ptr %x, ptr %endptr) {
; CHECK-LABEL: @test_simplify1(
- call i32 @strtol(i8* %x, i8** null, i32 10)
-; CHECK-NEXT: call i32 @strtol(i8* nocapture %x, i8** null, i32 10)
+ call i32 @strtol(ptr %x, ptr null, i32 10)
+; CHECK-NEXT: call i32 @strtol(ptr nocapture %x, ptr null, i32 10)
ret void
}
-define void @test_simplify2(i8* %x, i8** %endptr) {
+define void @test_simplify2(ptr %x, ptr %endptr) {
; CHECK-LABEL: @test_simplify2(
- call double @strtod(i8* %x, i8** null)
-; CHECK-NEXT: call double @strtod(i8* nocapture %x, i8** null)
+ call double @strtod(ptr %x, ptr null)
+; CHECK-NEXT: call double @strtod(ptr nocapture %x, ptr null)
ret void
}
-define void @test_simplify3(i8* %x, i8** %endptr) {
+define void @test_simplify3(ptr %x, ptr %endptr) {
; CHECK-LABEL: @test_simplify3(
- call float @strtof(i8* %x, i8** null)
-; CHECK-NEXT: call float @strtof(i8* nocapture %x, i8** null)
+ call float @strtof(ptr %x, ptr null)
+; CHECK-NEXT: call float @strtof(ptr nocapture %x, ptr null)
ret void
}
-define void @test_simplify4(i8* %x, i8** %endptr) {
+define void @test_simplify4(ptr %x, ptr %endptr) {
; CHECK-LABEL: @test_simplify4(
- call i64 @strtoul(i8* %x, i8** null, i32 10)
-; CHECK-NEXT: call i64 @strtoul(i8* nocapture %x, i8** null, i32 10)
+ call i64 @strtoul(ptr %x, ptr null, i32 10)
+; CHECK-NEXT: call i64 @strtoul(ptr nocapture %x, ptr null, i32 10)
ret void
}
-define void @test_simplify5(i8* %x, i8** %endptr) {
+define void @test_simplify5(ptr %x, ptr %endptr) {
; CHECK-LABEL: @test_simplify5(
- call i64 @strtoll(i8* %x, i8** null, i32 10)
-; CHECK-NEXT: call i64 @strtoll(i8* nocapture %x, i8** null, i32 10)
+ call i64 @strtoll(ptr %x, ptr null, i32 10)
+; CHECK-NEXT: call i64 @strtoll(ptr nocapture %x, ptr null, i32 10)
ret void
}
-define void @test_simplify6(i8* %x, i8** %endptr) {
+define void @test_simplify6(ptr %x, ptr %endptr) {
; CHECK-LABEL: @test_simplify6(
- call double @strtold(i8* %x, i8** null)
-; CHECK-NEXT: call double @strtold(i8* nocapture %x, i8** null)
+ call double @strtold(ptr %x, ptr null)
+; CHECK-NEXT: call double @strtold(ptr nocapture %x, ptr null)
ret void
}
-define void @test_simplify7(i8* %x, i8** %endptr) {
+define void @test_simplify7(ptr %x, ptr %endptr) {
; CHECK-LABEL: @test_simplify7(
- call i64 @strtoull(i8* %x, i8** null, i32 10)
-; CHECK-NEXT: call i64 @strtoull(i8* nocapture %x, i8** null, i32 10)
+ call i64 @strtoull(ptr %x, ptr null, i32 10)
+; CHECK-NEXT: call i64 @strtoull(ptr nocapture %x, ptr null, i32 10)
ret void
}
-define void @test_no_simplify1(i8* %x, i8** %endptr) {
+define void @test_no_simplify1(ptr %x, ptr %endptr) {
; CHECK-LABEL: @test_no_simplify1(
- call i32 @strtol(i8* %x, i8** %endptr, i32 10)
-; CHECK-NEXT: call i32 @strtol(i8* %x, i8** %endptr, i32 10)
+ call i32 @strtol(ptr %x, ptr %endptr, i32 10)
+; CHECK-NEXT: call i32 @strtol(ptr %x, ptr %endptr, i32 10)
ret void
}
target datalayout = "e-p:64:64:64-i1:8:8-i8:8:8-i16:16:16-i32:32:32-i64:64:64-f32:32:32-f64:64:64-v64:64:64-v128:128:128-a0:0:64-s0:64:64-f80:128:128-n8:16:32:64-S128"
-declare void @llvm.memcpy.p0i8.p0i8.i64(i8* nocapture, i8* nocapture, i64, i1) nounwind
+declare void @llvm.memcpy.p0.p0.i64(ptr nocapture, ptr nocapture, i64, i1) nounwind
%A = type { float }
-define void @test1(%A* %a1, %A* %a2) {
+define void @test1(ptr %a1, ptr %a2) {
entry:
; CHECK-LABEL: @test1
; CHECK: %[[LOAD:.*]] = load i32, {{.*}}, !tbaa [[TAG_A:!.*]]
; CHECK: store i32 %[[LOAD]], {{.*}}, !tbaa [[TAG_A]]
; CHECK: ret
- %0 = bitcast %A* %a1 to i8*
- %1 = bitcast %A* %a2 to i8*
- tail call void @llvm.memcpy.p0i8.p0i8.i64(i8* align 4 %0, i8* align 4 %1, i64 4, i1 false), !tbaa !4 ; TAG_A
+ tail call void @llvm.memcpy.p0.p0.i64(ptr align 4 %a1, ptr align 4 %a2, i64 4, i1 false), !tbaa !4 ; TAG_A
ret void
}
-%B = type { i32 (i8*, i32*, double*)** }
+%B = type { ptr }
-define i32 (i8*, i32*, double*)*** @test2() {
+define ptr @test2() {
; CHECK-LABEL: @test2
; CHECK-NOT: memcpy
; CHECK: ret
%tmp = alloca %B, align 8
- %tmp1 = bitcast %B* %tmp to i8*
- call void @llvm.memcpy.p0i8.p0i8.i64(i8* align 8 %tmp1, i8* align 8 undef, i64 8, i1 false), !tbaa !7 ; TAG_B
- %tmp2 = getelementptr %B, %B* %tmp, i32 0, i32 0
- %tmp3 = load i32 (i8*, i32*, double*)**, i32 (i8*, i32*, double*)*** %tmp2
- ret i32 (i8*, i32*, double*)*** %tmp2
+ call void @llvm.memcpy.p0.p0.i64(ptr align 8 %tmp, ptr align 8 undef, i64 8, i1 false), !tbaa !7 ; TAG_B
+ %tmp3 = load ptr, ptr %tmp
+ ret ptr %tmp
}
!0 = !{!"root"}
target datalayout = "e-p:64:64:64-i1:8:8-i8:8:8-i16:16:16-i32:32:32-i64:64:64-f32:32:32-f64:64:64-v64:64:64-v128:128:128-a0:0:64-s0:64:64-f80:128:128-n8:16:32:64-S128"
-declare void @llvm.memcpy.p0i8.p0i8.i64(i8* nocapture, i8* nocapture, i64, i1) nounwind
+declare void @llvm.memcpy.p0.p0.i64(ptr nocapture, ptr nocapture, i64, i1) nounwind
; Verify that instcombine preserves TBAA tags when converting a memcpy into
; a scalar load and store.
%struct.test1 = type { float }
; CHECK: @test
-; CHECK: %[[LOAD:.*]] = load i32, i32* %{{.*}}, align 4, !tbaa !0
-; CHECK: store i32 %[[LOAD:.*]], i32* %{{.*}}, align 4, !tbaa !0
+; CHECK: %[[LOAD:.*]] = load i32, ptr %{{.*}}, align 4, !tbaa !0
+; CHECK: store i32 %[[LOAD:.*]], ptr %{{.*}}, align 4, !tbaa !0
; CHECK: ret
-define void @test1(%struct.test1* nocapture %a, %struct.test1* nocapture %b) {
+define void @test1(ptr nocapture %a, ptr nocapture %b) {
entry:
- %0 = bitcast %struct.test1* %a to i8*
- %1 = bitcast %struct.test1* %b to i8*
- tail call void @llvm.memcpy.p0i8.p0i8.i64(i8* align 4 %0, i8* align 4 %1, i64 4, i1 false), !tbaa.struct !3
+ tail call void @llvm.memcpy.p0.p0.i64(ptr align 4 %a, ptr align 4 %b, i64 4, i1 false), !tbaa.struct !3
ret void
}
-%struct.test2 = type { i32 (i8*, i32*, double*)** }
+%struct.test2 = type { ptr }
-define i32 (i8*, i32*, double*)*** @test2() {
+define ptr @test2() {
; CHECK-LABEL: @test2(
; CHECK-NOT: memcpy
; CHECK: ret
%tmp = alloca %struct.test2, align 8
- %tmp1 = bitcast %struct.test2* %tmp to i8*
- call void @llvm.memcpy.p0i8.p0i8.i64(i8* align 8 %tmp1, i8* align 8 undef, i64 8, i1 false), !tbaa.struct !4
- %tmp2 = getelementptr %struct.test2, %struct.test2* %tmp, i32 0, i32 0
- %tmp3 = load i32 (i8*, i32*, double*)**, i32 (i8*, i32*, double*)*** %tmp2
- ret i32 (i8*, i32*, double*)*** %tmp2
+ call void @llvm.memcpy.p0.p0.i64(ptr align 8 %tmp, ptr align 8 undef, i64 8, i1 false), !tbaa.struct !4
+ %tmp3 = load ptr, ptr %tmp
+ ret ptr %tmp
}
; CHECK: !0 = !{!1, !1, i64 0}
; Extra uses
-define i32 @sub_ashr_and_i32_extra_use_sub(i32 %x, i32 %y, i32* %p) {
+define i32 @sub_ashr_and_i32_extra_use_sub(i32 %x, i32 %y, ptr %p) {
; CHECK-LABEL: @sub_ashr_and_i32_extra_use_sub(
; CHECK-NEXT: [[SUB:%.*]] = sub nsw i32 [[Y:%.*]], [[X:%.*]]
-; CHECK-NEXT: store i32 [[SUB]], i32* [[P:%.*]], align 4
+; CHECK-NEXT: store i32 [[SUB]], ptr [[P:%.*]], align 4
; CHECK-NEXT: [[ISNEG:%.*]] = icmp slt i32 [[SUB]], 0
; CHECK-NEXT: [[AND:%.*]] = select i1 [[ISNEG]], i32 [[X]], i32 0
; CHECK-NEXT: ret i32 [[AND]]
;
%sub = sub nsw i32 %y, %x
- store i32 %sub, i32* %p
+ store i32 %sub, ptr %p
%shr = ashr i32 %sub, 31
%and = and i32 %shr, %x
ret i32 %and
}
-define i32 @sub_ashr_and_i32_extra_use_and(i32 %x, i32 %y, i32* %p) {
+define i32 @sub_ashr_and_i32_extra_use_and(i32 %x, i32 %y, ptr %p) {
; CHECK-LABEL: @sub_ashr_and_i32_extra_use_and(
; CHECK-NEXT: [[TMP1:%.*]] = icmp slt i32 [[Y:%.*]], [[X:%.*]]
; CHECK-NEXT: [[AND:%.*]] = select i1 [[TMP1]], i32 [[X]], i32 0
-; CHECK-NEXT: store i32 [[AND]], i32* [[P:%.*]], align 4
+; CHECK-NEXT: store i32 [[AND]], ptr [[P:%.*]], align 4
; CHECK-NEXT: ret i32 [[AND]]
;
%sub = sub nsw i32 %y, %x
%shr = ashr i32 %sub, 31
%and = and i32 %shr, %x
- store i32 %and, i32* %p
+ store i32 %and, ptr %p
ret i32 %and
}
; Negative Tests
-define i32 @sub_ashr_and_i32_extra_use_ashr(i32 %x, i32 %y, i32* %p) {
+define i32 @sub_ashr_and_i32_extra_use_ashr(i32 %x, i32 %y, ptr %p) {
; CHECK-LABEL: @sub_ashr_and_i32_extra_use_ashr(
; CHECK-NEXT: [[TMP1:%.*]] = icmp slt i32 [[Y:%.*]], [[X:%.*]]
; CHECK-NEXT: [[SHR:%.*]] = sext i1 [[TMP1]] to i32
-; CHECK-NEXT: store i32 [[SHR]], i32* [[P:%.*]], align 4
+; CHECK-NEXT: store i32 [[SHR]], ptr [[P:%.*]], align 4
; CHECK-NEXT: [[AND:%.*]] = and i32 [[SHR]], [[X]]
; CHECK-NEXT: ret i32 [[AND]]
;
%sub = sub nsw i32 %y, %x
%shr = ashr i32 %sub, 31
- store i32 %shr, i32* %p
+ store i32 %shr, ptr %p
%and = and i32 %shr, %x
ret i32 %and
}
; Extra uses
-define i32 @sub_ashr_or_i32_extra_use_sub(i32 %x, i32 %y, i32* %p) {
+define i32 @sub_ashr_or_i32_extra_use_sub(i32 %x, i32 %y, ptr %p) {
; CHECK-LABEL: @sub_ashr_or_i32_extra_use_sub(
; CHECK-NEXT: [[SUB:%.*]] = sub nsw i32 [[Y:%.*]], [[X:%.*]]
-; CHECK-NEXT: store i32 [[SUB]], i32* [[P:%.*]], align 4
+; CHECK-NEXT: store i32 [[SUB]], ptr [[P:%.*]], align 4
; CHECK-NEXT: [[TMP1:%.*]] = icmp slt i32 [[Y]], [[X]]
; CHECK-NEXT: [[OR:%.*]] = select i1 [[TMP1]], i32 -1, i32 [[X]]
; CHECK-NEXT: ret i32 [[OR]]
;
%sub = sub nsw i32 %y, %x
- store i32 %sub, i32* %p
+ store i32 %sub, ptr %p
%shr = ashr i32 %sub, 31
%or = or i32 %shr, %x
ret i32 %or
}
-define i32 @sub_ashr_or_i32_extra_use_or(i32 %x, i32 %y, i32* %p) {
+define i32 @sub_ashr_or_i32_extra_use_or(i32 %x, i32 %y, ptr %p) {
; CHECK-LABEL: @sub_ashr_or_i32_extra_use_or(
; CHECK-NEXT: [[TMP1:%.*]] = icmp slt i32 [[Y:%.*]], [[X:%.*]]
; CHECK-NEXT: [[OR:%.*]] = select i1 [[TMP1]], i32 -1, i32 [[X]]
-; CHECK-NEXT: store i32 [[OR]], i32* [[P:%.*]], align 4
+; CHECK-NEXT: store i32 [[OR]], ptr [[P:%.*]], align 4
; CHECK-NEXT: ret i32 [[OR]]
;
%sub = sub nsw i32 %y, %x
%shr = ashr i32 %sub, 31
%or = or i32 %shr, %x
- store i32 %or, i32* %p
+ store i32 %or, ptr %p
ret i32 %or
}
-define i32 @neg_extra_use_or_ashr_i32(i32 %x, i32* %p) {
+define i32 @neg_extra_use_or_ashr_i32(i32 %x, ptr %p) {
; CHECK-LABEL: @neg_extra_use_or_ashr_i32(
; CHECK-NEXT: [[NEG:%.*]] = sub i32 0, [[X:%.*]]
; CHECK-NEXT: [[TMP1:%.*]] = icmp ne i32 [[X]], 0
; CHECK-NEXT: [[SHR:%.*]] = sext i1 [[TMP1]] to i32
-; CHECK-NEXT: store i32 [[NEG]], i32* [[P:%.*]], align 4
+; CHECK-NEXT: store i32 [[NEG]], ptr [[P:%.*]], align 4
; CHECK-NEXT: ret i32 [[SHR]]
;
%neg = sub i32 0, %x
%or = or i32 %neg, %x
%shr = ashr i32 %or, 31
- store i32 %neg, i32* %p
+ store i32 %neg, ptr %p
ret i32 %shr
}
; Negative Tests
-define i32 @sub_ashr_or_i32_extra_use_ashr(i32 %x, i32 %y, i32* %p) {
+define i32 @sub_ashr_or_i32_extra_use_ashr(i32 %x, i32 %y, ptr %p) {
; CHECK-LABEL: @sub_ashr_or_i32_extra_use_ashr(
; CHECK-NEXT: [[TMP1:%.*]] = icmp slt i32 [[Y:%.*]], [[X:%.*]]
; CHECK-NEXT: [[SHR:%.*]] = sext i1 [[TMP1]] to i32
-; CHECK-NEXT: store i32 [[SHR]], i32* [[P:%.*]], align 4
+; CHECK-NEXT: store i32 [[SHR]], ptr [[P:%.*]], align 4
; CHECK-NEXT: [[OR:%.*]] = or i32 [[SHR]], [[X]]
; CHECK-NEXT: ret i32 [[OR]]
;
%sub = sub nsw i32 %y, %x
%shr = ashr i32 %sub, 31
- store i32 %shr, i32* %p
+ store i32 %shr, ptr %p
%or = or i32 %shr, %x
ret i32 %or
}
ret i32 %or
}
-define i32 @neg_or_extra_use_ashr_i32(i32 %x, i32* %p) {
+define i32 @neg_or_extra_use_ashr_i32(i32 %x, ptr %p) {
; CHECK-LABEL: @neg_or_extra_use_ashr_i32(
; CHECK-NEXT: [[NEG:%.*]] = sub i32 0, [[X:%.*]]
; CHECK-NEXT: [[OR:%.*]] = or i32 [[NEG]], [[X]]
; CHECK-NEXT: [[SHR:%.*]] = ashr i32 [[OR]], 31
-; CHECK-NEXT: store i32 [[OR]], i32* [[P:%.*]], align 4
+; CHECK-NEXT: store i32 [[OR]], ptr [[P:%.*]], align 4
; CHECK-NEXT: ret i32 [[SHR]]
;
%neg = sub i32 0, %x
%or = or i32 %neg, %x
%shr = ashr i32 %or, 31
- store i32 %or, i32* %p
+ store i32 %or, ptr %p
ret i32 %shr
}
; PR49870
@g0 = external global i8, align 1
@g1 = external global i8, align 1
-define i32 @constantexpr0(i32 %x, i8* %y) unnamed_addr {
+define i32 @constantexpr0(i32 %x, ptr %y) unnamed_addr {
; CHECK-LABEL: @constantexpr0(
-; CHECK-NEXT: [[I0:%.*]] = add i32 [[X:%.*]], ptrtoint (i8* @g0 to i32)
+; CHECK-NEXT: [[I0:%.*]] = add i32 [[X:%.*]], ptrtoint (ptr @g0 to i32)
; CHECK-NEXT: [[R:%.*]] = sub i32 0, [[I0]]
; CHECK-NEXT: ret i32 [[R]]
;
- %i0 = add i32 %x, ptrtoint (i8* @g0 to i32)
+ %i0 = add i32 %x, ptrtoint (ptr @g0 to i32)
%r = sub i32 0, %i0
ret i32 %r
}
-define i32 @constantexpr1(i32 %x, i8* %y) unnamed_addr {
+define i32 @constantexpr1(i32 %x, ptr %y) unnamed_addr {
; CHECK-LABEL: @constantexpr1(
; CHECK-NEXT: [[I0:%.*]] = add i32 [[X:%.*]], 42
-; CHECK-NEXT: [[R:%.*]] = sub i32 ptrtoint (i8* @g1 to i32), [[I0]]
+; CHECK-NEXT: [[R:%.*]] = sub i32 ptrtoint (ptr @g1 to i32), [[I0]]
; CHECK-NEXT: ret i32 [[R]]
;
%i0 = add i32 %x, 42
- %r = sub i32 ptrtoint (i8* @g1 to i32), %i0
+ %r = sub i32 ptrtoint (ptr @g1 to i32), %i0
ret i32 %r
}
-define i32 @constantexpr2(i32 %x, i8* %y) unnamed_addr {
+define i32 @constantexpr2(i32 %x, ptr %y) unnamed_addr {
; CHECK-LABEL: @constantexpr2(
-; CHECK-NEXT: [[I0:%.*]] = add i32 [[X:%.*]], ptrtoint (i8* @g0 to i32)
-; CHECK-NEXT: [[R:%.*]] = sub i32 ptrtoint (i8* @g1 to i32), [[I0]]
+; CHECK-NEXT: [[I0:%.*]] = add i32 [[X:%.*]], ptrtoint (ptr @g0 to i32)
+; CHECK-NEXT: [[R:%.*]] = sub i32 ptrtoint (ptr @g1 to i32), [[I0]]
; CHECK-NEXT: ret i32 [[R]]
;
- %i0 = add i32 %x, ptrtoint (i8* @g0 to i32)
- %r = sub i32 ptrtoint (i8* @g1 to i32), %i0
+ %i0 = add i32 %x, ptrtoint (ptr @g0 to i32)
+ %r = sub i32 ptrtoint (ptr @g1 to i32), %i0
ret i32 %r
}
define i64 @pr49870(i64 %x) {
; CHECK-LABEL: @pr49870(
; CHECK-NEXT: [[I0:%.*]] = xor i64 [[X:%.*]], -1
-; CHECK-NEXT: [[R:%.*]] = add i64 [[I0]], ptrtoint (i8* @g0 to i64)
+; CHECK-NEXT: [[R:%.*]] = add i64 [[I0]], ptrtoint (ptr @g0 to i64)
; CHECK-NEXT: ret i64 [[R]]
;
%i0 = xor i64 %x, -1
- %r = add i64 %i0, ptrtoint (i8* @g0 to i64)
+ %r = add i64 %i0, ptrtoint (ptr @g0 to i64)
ret i64 %r
}
target datalayout = "e-p:64:64:64-p1:16:16:16-i1:8:8-i8:8:8-i16:16:16-i32:32:32-i64:64:64-f32:32:32-f64:64:64-v64:64:64-v128:128:128-a0:0:64-s0:64:64-f80:128:128"
-define i64 @test_inbounds([0 x i32]* %base, i64 %idx) {
+define i64 @test_inbounds(ptr %base, i64 %idx) {
; CHECK-LABEL: @test_inbounds(
; CHECK-NEXT: [[P2_IDX:%.*]] = shl nsw i64 [[IDX:%.*]], 2
; CHECK-NEXT: ret i64 [[P2_IDX]]
;
- %p1 = getelementptr inbounds [0 x i32], [0 x i32]* %base, i64 0, i64 0
- %p2 = getelementptr inbounds [0 x i32], [0 x i32]* %base, i64 0, i64 %idx
- %i1 = ptrtoint i32* %p1 to i64
- %i2 = ptrtoint i32* %p2 to i64
+ %p2 = getelementptr inbounds [0 x i32], ptr %base, i64 0, i64 %idx
+ %i1 = ptrtoint ptr %base to i64
+ %i2 = ptrtoint ptr %p2 to i64
%d = sub i64 %i2, %i1
ret i64 %d
}
-define i64 @test_partial_inbounds1([0 x i32]* %base, i64 %idx) {
+define i64 @test_partial_inbounds1(ptr %base, i64 %idx) {
; CHECK-LABEL: @test_partial_inbounds1(
; CHECK-NEXT: [[P2_IDX:%.*]] = shl i64 [[IDX:%.*]], 2
; CHECK-NEXT: ret i64 [[P2_IDX]]
;
- %p1 = getelementptr inbounds [0 x i32], [0 x i32]* %base, i64 0, i64 0
- %p2 = getelementptr [0 x i32], [0 x i32]* %base, i64 0, i64 %idx
- %i1 = ptrtoint i32* %p1 to i64
- %i2 = ptrtoint i32* %p2 to i64
+ %p2 = getelementptr [0 x i32], ptr %base, i64 0, i64 %idx
+ %i1 = ptrtoint ptr %base to i64
+ %i2 = ptrtoint ptr %p2 to i64
%d = sub i64 %i2, %i1
ret i64 %d
}
-define i64 @test_partial_inbounds2([0 x i32]* %base, i64 %idx) {
+define i64 @test_partial_inbounds2(ptr %base, i64 %idx) {
; CHECK-LABEL: @test_partial_inbounds2(
; CHECK-NEXT: [[P2_IDX:%.*]] = shl nsw i64 [[IDX:%.*]], 2
; CHECK-NEXT: ret i64 [[P2_IDX]]
;
- %p1 = getelementptr [0 x i32], [0 x i32]* %base, i64 0, i64 0
- %p2 = getelementptr inbounds [0 x i32], [0 x i32]* %base, i64 0, i64 %idx
- %i1 = ptrtoint i32* %p1 to i64
- %i2 = ptrtoint i32* %p2 to i64
+ %p2 = getelementptr inbounds [0 x i32], ptr %base, i64 0, i64 %idx
+ %i1 = ptrtoint ptr %base to i64
+ %i2 = ptrtoint ptr %p2 to i64
%d = sub i64 %i2, %i1
ret i64 %d
}
-define i64 @test_inbounds_nuw([0 x i32]* %base, i64 %idx) {
+define i64 @test_inbounds_nuw(ptr %base, i64 %idx) {
; CHECK-LABEL: @test_inbounds_nuw(
; CHECK-NEXT: [[P2_IDX:%.*]] = shl nuw nsw i64 [[IDX:%.*]], 2
; CHECK-NEXT: ret i64 [[P2_IDX]]
;
- %p1 = getelementptr inbounds [0 x i32], [0 x i32]* %base, i64 0, i64 0
- %p2 = getelementptr inbounds [0 x i32], [0 x i32]* %base, i64 0, i64 %idx
- %i1 = ptrtoint i32* %p1 to i64
- %i2 = ptrtoint i32* %p2 to i64
+ %p2 = getelementptr inbounds [0 x i32], ptr %base, i64 0, i64 %idx
+ %i1 = ptrtoint ptr %base to i64
+ %i2 = ptrtoint ptr %p2 to i64
%d = sub nuw i64 %i2, %i1
ret i64 %d
}
-define i64 @test_nuw([0 x i32]* %base, i64 %idx) {
+define i64 @test_nuw(ptr %base, i64 %idx) {
; CHECK-LABEL: @test_nuw(
; CHECK-NEXT: [[P2_IDX:%.*]] = shl i64 [[IDX:%.*]], 2
; CHECK-NEXT: ret i64 [[P2_IDX]]
;
- %p1 = getelementptr [0 x i32], [0 x i32]* %base, i64 0, i64 0
- %p2 = getelementptr [0 x i32], [0 x i32]* %base, i64 0, i64 %idx
- %i1 = ptrtoint i32* %p1 to i64
- %i2 = ptrtoint i32* %p2 to i64
+ %p2 = getelementptr [0 x i32], ptr %base, i64 0, i64 %idx
+ %i1 = ptrtoint ptr %base to i64
+ %i2 = ptrtoint ptr %p2 to i64
%d = sub nuw i64 %i2, %i1
ret i64 %d
}
-define i32 @test_inbounds_nuw_trunc([0 x i32]* %base, i64 %idx) {
+define i32 @test_inbounds_nuw_trunc(ptr %base, i64 %idx) {
; CHECK-LABEL: @test_inbounds_nuw_trunc(
; CHECK-NEXT: [[IDX_TR:%.*]] = trunc i64 [[IDX:%.*]] to i32
; CHECK-NEXT: [[TMP1:%.*]] = shl i32 [[IDX_TR]], 2
; CHECK-NEXT: ret i32 [[TMP1]]
;
- %p1 = getelementptr inbounds [0 x i32], [0 x i32]* %base, i64 0, i64 0
- %p2 = getelementptr inbounds [0 x i32], [0 x i32]* %base, i64 0, i64 %idx
- %i1 = ptrtoint i32* %p1 to i64
- %i2 = ptrtoint i32* %p2 to i64
+ %p2 = getelementptr inbounds [0 x i32], ptr %base, i64 0, i64 %idx
+ %i1 = ptrtoint ptr %base to i64
+ %i2 = ptrtoint ptr %p2 to i64
%t1 = trunc i64 %i1 to i32
%t2 = trunc i64 %i2 to i32
%d = sub nuw i32 %t2, %t1
ret i32 %d
}
-define i64 @test_inbounds_nuw_swapped([0 x i32]* %base, i64 %idx) {
+define i64 @test_inbounds_nuw_swapped(ptr %base, i64 %idx) {
; CHECK-LABEL: @test_inbounds_nuw_swapped(
; CHECK-NEXT: [[P2_IDX_NEG:%.*]] = mul i64 [[IDX:%.*]], -4
; CHECK-NEXT: ret i64 [[P2_IDX_NEG]]
;
- %p1 = getelementptr inbounds [0 x i32], [0 x i32]* %base, i64 0, i64 0
- %p2 = getelementptr inbounds [0 x i32], [0 x i32]* %base, i64 0, i64 %idx
- %i1 = ptrtoint i32* %p2 to i64
- %i2 = ptrtoint i32* %p1 to i64
+ %p2 = getelementptr inbounds [0 x i32], ptr %base, i64 0, i64 %idx
+ %i1 = ptrtoint ptr %p2 to i64
+ %i2 = ptrtoint ptr %base to i64
%d = sub nuw i64 %i2, %i1
ret i64 %d
}
-define i64 @test_inbounds1_nuw_swapped([0 x i32]* %base, i64 %idx) {
+define i64 @test_inbounds1_nuw_swapped(ptr %base, i64 %idx) {
; CHECK-LABEL: @test_inbounds1_nuw_swapped(
; CHECK-NEXT: [[P2_IDX_NEG:%.*]] = mul i64 [[IDX:%.*]], -4
; CHECK-NEXT: ret i64 [[P2_IDX_NEG]]
;
- %p1 = getelementptr inbounds [0 x i32], [0 x i32]* %base, i64 0, i64 0
- %p2 = getelementptr [0 x i32], [0 x i32]* %base, i64 0, i64 %idx
- %i1 = ptrtoint i32* %p2 to i64
- %i2 = ptrtoint i32* %p1 to i64
+ %p2 = getelementptr [0 x i32], ptr %base, i64 0, i64 %idx
+ %i1 = ptrtoint ptr %p2 to i64
+ %i2 = ptrtoint ptr %base to i64
%d = sub nuw i64 %i2, %i1
ret i64 %d
}
-define i64 @test_inbounds2_nuw_swapped([0 x i32]* %base, i64 %idx) {
+define i64 @test_inbounds2_nuw_swapped(ptr %base, i64 %idx) {
; CHECK-LABEL: @test_inbounds2_nuw_swapped(
; CHECK-NEXT: [[P2_IDX_NEG:%.*]] = mul i64 [[IDX:%.*]], -4
; CHECK-NEXT: ret i64 [[P2_IDX_NEG]]
;
- %p1 = getelementptr [0 x i32], [0 x i32]* %base, i64 0, i64 0
- %p2 = getelementptr inbounds [0 x i32], [0 x i32]* %base, i64 0, i64 %idx
- %i1 = ptrtoint i32* %p2 to i64
- %i2 = ptrtoint i32* %p1 to i64
+ %p2 = getelementptr inbounds [0 x i32], ptr %base, i64 0, i64 %idx
+ %i1 = ptrtoint ptr %p2 to i64
+ %i2 = ptrtoint ptr %base to i64
%d = sub nuw i64 %i2, %i1
ret i64 %d
}
-define i64 @test_inbounds_two_gep([0 x i32]* %base, i64 %idx, i64 %idx2) {
+define i64 @test_inbounds_two_gep(ptr %base, i64 %idx, i64 %idx2) {
; CHECK-LABEL: @test_inbounds_two_gep(
; CHECK-NEXT: [[TMP1:%.*]] = sub nsw i64 [[IDX2:%.*]], [[IDX:%.*]]
; CHECK-NEXT: [[GEPDIFF:%.*]] = shl nsw i64 [[TMP1]], 2
; CHECK-NEXT: ret i64 [[GEPDIFF]]
;
- %p1 = getelementptr inbounds [0 x i32], [0 x i32]* %base, i64 0, i64 %idx
- %p2 = getelementptr inbounds [0 x i32], [0 x i32]* %base, i64 0, i64 %idx2
- %i1 = ptrtoint i32* %p1 to i64
- %i2 = ptrtoint i32* %p2 to i64
+ %p1 = getelementptr inbounds [0 x i32], ptr %base, i64 0, i64 %idx
+ %p2 = getelementptr inbounds [0 x i32], ptr %base, i64 0, i64 %idx2
+ %i1 = ptrtoint ptr %p1 to i64
+ %i2 = ptrtoint ptr %p2 to i64
%d = sub i64 %i2, %i1
ret i64 %d
}
-define i64 @test_inbounds_nsw_two_gep([0 x i32]* %base, i64 %idx, i64 %idx2) {
+define i64 @test_inbounds_nsw_two_gep(ptr %base, i64 %idx, i64 %idx2) {
; CHECK-LABEL: @test_inbounds_nsw_two_gep(
; CHECK-NEXT: [[TMP1:%.*]] = sub nsw i64 [[IDX2:%.*]], [[IDX:%.*]]
; CHECK-NEXT: [[GEPDIFF:%.*]] = shl nsw i64 [[TMP1]], 2
; CHECK-NEXT: ret i64 [[GEPDIFF]]
;
- %p1 = getelementptr inbounds [0 x i32], [0 x i32]* %base, i64 0, i64 %idx
- %p2 = getelementptr inbounds [0 x i32], [0 x i32]* %base, i64 0, i64 %idx2
- %i1 = ptrtoint i32* %p1 to i64
- %i2 = ptrtoint i32* %p2 to i64
+ %p1 = getelementptr inbounds [0 x i32], ptr %base, i64 0, i64 %idx
+ %p2 = getelementptr inbounds [0 x i32], ptr %base, i64 0, i64 %idx2
+ %i1 = ptrtoint ptr %p1 to i64
+ %i2 = ptrtoint ptr %p2 to i64
%d = sub nsw i64 %i2, %i1
ret i64 %d
}
-define i64 @test_inbounds_nuw_two_gep([0 x i32]* %base, i64 %idx, i64 %idx2) {
+define i64 @test_inbounds_nuw_two_gep(ptr %base, i64 %idx, i64 %idx2) {
; CHECK-LABEL: @test_inbounds_nuw_two_gep(
; CHECK-NEXT: [[TMP1:%.*]] = sub nsw i64 [[IDX2:%.*]], [[IDX:%.*]]
; CHECK-NEXT: [[GEPDIFF:%.*]] = shl nsw i64 [[TMP1]], 2
; CHECK-NEXT: ret i64 [[GEPDIFF]]
;
- %p1 = getelementptr inbounds [0 x i32], [0 x i32]* %base, i64 0, i64 %idx
- %p2 = getelementptr inbounds [0 x i32], [0 x i32]* %base, i64 0, i64 %idx2
- %i1 = ptrtoint i32* %p1 to i64
- %i2 = ptrtoint i32* %p2 to i64
+ %p1 = getelementptr inbounds [0 x i32], ptr %base, i64 0, i64 %idx
+ %p2 = getelementptr inbounds [0 x i32], ptr %base, i64 0, i64 %idx2
+ %i1 = ptrtoint ptr %p1 to i64
+ %i2 = ptrtoint ptr %p2 to i64
%d = sub nuw i64 %i2, %i1
ret i64 %d
}
-define i64 @test_inbounds_nuw_multi_index([0 x [2 x i32]]* %base, i64 %idx, i64 %idx2) {
+define i64 @test_inbounds_nuw_multi_index(ptr %base, i64 %idx, i64 %idx2) {
; CHECK-LABEL: @test_inbounds_nuw_multi_index(
; CHECK-NEXT: [[P2_IDX:%.*]] = shl nsw i64 [[IDX:%.*]], 3
; CHECK-NEXT: [[P2_IDX1:%.*]] = shl nsw i64 [[IDX2:%.*]], 2
; CHECK-NEXT: [[P2_OFFS:%.*]] = add nsw i64 [[P2_IDX]], [[P2_IDX1]]
; CHECK-NEXT: ret i64 [[P2_OFFS]]
;
- %p1 = getelementptr inbounds [0 x [2 x i32]], [0 x [2 x i32]]* %base, i64 0, i64 0, i64 0
- %p2 = getelementptr inbounds [0 x [2 x i32]], [0 x [2 x i32]]* %base, i64 0, i64 %idx, i64 %idx2
- %i1 = ptrtoint i32* %p1 to i64
- %i2 = ptrtoint i32* %p2 to i64
+ %p2 = getelementptr inbounds [0 x [2 x i32]], ptr %base, i64 0, i64 %idx, i64 %idx2
+ %i1 = ptrtoint ptr %base to i64
+ %i2 = ptrtoint ptr %p2 to i64
%d = sub nuw i64 %i2, %i1
ret i64 %d
}
; rdar://7362831
-define i32 @test23(i8* %P, i64 %A){
+define i32 @test23(ptr %P, i64 %A){
; CHECK-LABEL: @test23(
; CHECK-NEXT: [[TMP1:%.*]] = trunc i64 [[A:%.*]] to i32
; CHECK-NEXT: ret i32 [[TMP1]]
;
- %B = getelementptr inbounds i8, i8* %P, i64 %A
- %C = ptrtoint i8* %B to i64
+ %B = getelementptr inbounds i8, ptr %P, i64 %A
+ %C = ptrtoint ptr %B to i64
%D = trunc i64 %C to i32
- %E = ptrtoint i8* %P to i64
+ %E = ptrtoint ptr %P to i64
%F = trunc i64 %E to i32
%G = sub i32 %D, %F
ret i32 %G
}
-define i8 @test23_as1(i8 addrspace(1)* %P, i16 %A) {
+define i8 @test23_as1(ptr addrspace(1) %P, i16 %A) {
; CHECK-LABEL: @test23_as1(
; CHECK-NEXT: [[TMP1:%.*]] = trunc i16 [[A:%.*]] to i8
; CHECK-NEXT: ret i8 [[TMP1]]
;
- %B = getelementptr inbounds i8, i8 addrspace(1)* %P, i16 %A
- %C = ptrtoint i8 addrspace(1)* %B to i16
+ %B = getelementptr inbounds i8, ptr addrspace(1) %P, i16 %A
+ %C = ptrtoint ptr addrspace(1) %B to i16
%D = trunc i16 %C to i8
- %E = ptrtoint i8 addrspace(1)* %P to i16
+ %E = ptrtoint ptr addrspace(1) %P to i16
%F = trunc i16 %E to i8
%G = sub i8 %D, %F
ret i8 %G
}
-define i64 @test24(i8* %P, i64 %A){
+define i64 @test24(ptr %P, i64 %A){
; CHECK-LABEL: @test24(
; CHECK-NEXT: ret i64 [[A:%.*]]
;
- %B = getelementptr inbounds i8, i8* %P, i64 %A
- %C = ptrtoint i8* %B to i64
- %E = ptrtoint i8* %P to i64
+ %B = getelementptr inbounds i8, ptr %P, i64 %A
+ %C = ptrtoint ptr %B to i64
+ %E = ptrtoint ptr %P to i64
%G = sub i64 %C, %E
ret i64 %G
}
-define i16 @test24_as1(i8 addrspace(1)* %P, i16 %A) {
+define i16 @test24_as1(ptr addrspace(1) %P, i16 %A) {
; CHECK-LABEL: @test24_as1(
; CHECK-NEXT: ret i16 [[A:%.*]]
;
- %B = getelementptr inbounds i8, i8 addrspace(1)* %P, i16 %A
- %C = ptrtoint i8 addrspace(1)* %B to i16
- %E = ptrtoint i8 addrspace(1)* %P to i16
+ %B = getelementptr inbounds i8, ptr addrspace(1) %P, i16 %A
+ %C = ptrtoint ptr addrspace(1) %B to i16
+ %E = ptrtoint ptr addrspace(1) %P to i16
%G = sub i16 %C, %E
ret i16 %G
}
-define i64 @test24a(i8* %P, i64 %A){
+define i64 @test24a(ptr %P, i64 %A){
; CHECK-LABEL: @test24a(
; CHECK-NEXT: [[DIFF_NEG:%.*]] = sub i64 0, [[A:%.*]]
; CHECK-NEXT: ret i64 [[DIFF_NEG]]
;
- %B = getelementptr inbounds i8, i8* %P, i64 %A
- %C = ptrtoint i8* %B to i64
- %E = ptrtoint i8* %P to i64
+ %B = getelementptr inbounds i8, ptr %P, i64 %A
+ %C = ptrtoint ptr %B to i64
+ %E = ptrtoint ptr %P to i64
%G = sub i64 %E, %C
ret i64 %G
}
-define i16 @test24a_as1(i8 addrspace(1)* %P, i16 %A) {
+define i16 @test24a_as1(ptr addrspace(1) %P, i16 %A) {
; CHECK-LABEL: @test24a_as1(
; CHECK-NEXT: [[DIFF_NEG:%.*]] = sub i16 0, [[A:%.*]]
; CHECK-NEXT: ret i16 [[DIFF_NEG]]
;
- %B = getelementptr inbounds i8, i8 addrspace(1)* %P, i16 %A
- %C = ptrtoint i8 addrspace(1)* %B to i16
- %E = ptrtoint i8 addrspace(1)* %P to i16
+ %B = getelementptr inbounds i8, ptr addrspace(1) %P, i16 %A
+ %C = ptrtoint ptr addrspace(1) %B to i16
+ %E = ptrtoint ptr addrspace(1) %P to i16
%G = sub i16 %E, %C
ret i16 %G
}
@Arr = external global [42 x i16]
-define i64 @test24b(i8* %P, i64 %A){
+define i64 @test24b(ptr %P, i64 %A){
; CHECK-LABEL: @test24b(
; CHECK-NEXT: [[B_IDX:%.*]] = shl nsw i64 [[A:%.*]], 1
; CHECK-NEXT: ret i64 [[B_IDX]]
;
- %B = getelementptr inbounds [42 x i16], [42 x i16]* @Arr, i64 0, i64 %A
- %C = ptrtoint i16* %B to i64
- %G = sub i64 %C, ptrtoint ([42 x i16]* @Arr to i64)
+ %B = getelementptr inbounds [42 x i16], ptr @Arr, i64 0, i64 %A
+ %C = ptrtoint ptr %B to i64
+ %G = sub i64 %C, ptrtoint (ptr @Arr to i64)
ret i64 %G
}
-define i64 @test25(i8* %P, i64 %A){
+define i64 @test25(ptr %P, i64 %A){
; CHECK-LABEL: @test25(
; CHECK-NEXT: [[B_IDX:%.*]] = shl nsw i64 [[A:%.*]], 1
; CHECK-NEXT: [[GEPDIFF:%.*]] = add nsw i64 [[B_IDX]], -84
; CHECK-NEXT: ret i64 [[GEPDIFF]]
;
- %B = getelementptr inbounds [42 x i16], [42 x i16]* @Arr, i64 0, i64 %A
- %C = ptrtoint i16* %B to i64
- %G = sub i64 %C, ptrtoint (i16* getelementptr ([42 x i16], [42 x i16]* @Arr, i64 1, i64 0) to i64)
+ %B = getelementptr inbounds [42 x i16], ptr @Arr, i64 0, i64 %A
+ %C = ptrtoint ptr %B to i64
+ %G = sub i64 %C, ptrtoint (ptr getelementptr ([42 x i16], ptr @Arr, i64 1, i64 0) to i64)
ret i64 %G
}
@Arr_as1 = external addrspace(1) global [42 x i16]
-define i16 @test25_as1(i8 addrspace(1)* %P, i64 %A) {
+define i16 @test25_as1(ptr addrspace(1) %P, i64 %A) {
; CHECK-LABEL: @test25_as1(
; CHECK-NEXT: [[TMP1:%.*]] = trunc i64 [[A:%.*]] to i16
; CHECK-NEXT: [[B_IDX:%.*]] = shl nsw i16 [[TMP1]], 1
; CHECK-NEXT: [[GEPDIFF:%.*]] = add nsw i16 [[B_IDX]], -84
; CHECK-NEXT: ret i16 [[GEPDIFF]]
;
- %B = getelementptr inbounds [42 x i16], [42 x i16] addrspace(1)* @Arr_as1, i64 0, i64 %A
- %C = ptrtoint i16 addrspace(1)* %B to i16
- %G = sub i16 %C, ptrtoint (i16 addrspace(1)* getelementptr ([42 x i16], [42 x i16] addrspace(1)* @Arr_as1, i64 1, i64 0) to i16)
+ %B = getelementptr inbounds [42 x i16], ptr addrspace(1) @Arr_as1, i64 0, i64 %A
+ %C = ptrtoint ptr addrspace(1) %B to i16
+ %G = sub i16 %C, ptrtoint (ptr addrspace(1) getelementptr ([42 x i16], ptr addrspace(1) @Arr_as1, i64 1, i64 0) to i16)
ret i16 %G
}
-define i64 @test30(i8* %foo, i64 %i, i64 %j) {
+define i64 @test30(ptr %foo, i64 %i, i64 %j) {
; CHECK-LABEL: @test30(
; CHECK-NEXT: [[GEP1_IDX:%.*]] = shl nsw i64 [[I:%.*]], 2
; CHECK-NEXT: [[GEPDIFF:%.*]] = sub nsw i64 [[GEP1_IDX]], [[J:%.*]]
; CHECK-NEXT: ret i64 [[GEPDIFF]]
;
- %bit = bitcast i8* %foo to i32*
- %gep1 = getelementptr inbounds i32, i32* %bit, i64 %i
- %gep2 = getelementptr inbounds i8, i8* %foo, i64 %j
- %cast1 = ptrtoint i32* %gep1 to i64
- %cast2 = ptrtoint i8* %gep2 to i64
+ %gep1 = getelementptr inbounds i32, ptr %foo, i64 %i
+ %gep2 = getelementptr inbounds i8, ptr %foo, i64 %j
+ %cast1 = ptrtoint ptr %gep1 to i64
+ %cast2 = ptrtoint ptr %gep2 to i64
%sub = sub i64 %cast1, %cast2
ret i64 %sub
}
-define i16 @test30_as1(i8 addrspace(1)* %foo, i16 %i, i16 %j) {
+define i16 @test30_as1(ptr addrspace(1) %foo, i16 %i, i16 %j) {
; CHECK-LABEL: @test30_as1(
; CHECK-NEXT: [[GEP1_IDX:%.*]] = shl nsw i16 [[I:%.*]], 2
; CHECK-NEXT: [[GEPDIFF:%.*]] = sub nsw i16 [[GEP1_IDX]], [[J:%.*]]
; CHECK-NEXT: ret i16 [[GEPDIFF]]
;
- %bit = bitcast i8 addrspace(1)* %foo to i32 addrspace(1)*
- %gep1 = getelementptr inbounds i32, i32 addrspace(1)* %bit, i16 %i
- %gep2 = getelementptr inbounds i8, i8 addrspace(1)* %foo, i16 %j
- %cast1 = ptrtoint i32 addrspace(1)* %gep1 to i16
- %cast2 = ptrtoint i8 addrspace(1)* %gep2 to i16
+ %gep1 = getelementptr inbounds i32, ptr addrspace(1) %foo, i16 %i
+ %gep2 = getelementptr inbounds i8, ptr addrspace(1) %foo, i16 %j
+ %cast1 = ptrtoint ptr addrspace(1) %gep1 to i16
+ %cast2 = ptrtoint ptr addrspace(1) %gep2 to i16
%sub = sub i16 %cast1, %cast2
ret i16 %sub
}
; Inbounds translates to 'nsw' on sub
-define i64 @gep_diff_both_inbounds(i8* %foo, i64 %i, i64 %j) {
+define i64 @gep_diff_both_inbounds(ptr %foo, i64 %i, i64 %j) {
; CHECK-LABEL: @gep_diff_both_inbounds(
; CHECK-NEXT: [[GEPDIFF:%.*]] = sub nsw i64 [[I:%.*]], [[J:%.*]]
; CHECK-NEXT: ret i64 [[GEPDIFF]]
;
- %gep1 = getelementptr inbounds i8, i8* %foo, i64 %i
- %gep2 = getelementptr inbounds i8, i8* %foo, i64 %j
- %cast1 = ptrtoint i8* %gep1 to i64
- %cast2 = ptrtoint i8* %gep2 to i64
+ %gep1 = getelementptr inbounds i8, ptr %foo, i64 %i
+ %gep2 = getelementptr inbounds i8, ptr %foo, i64 %j
+ %cast1 = ptrtoint ptr %gep1 to i64
+ %cast2 = ptrtoint ptr %gep2 to i64
%sub = sub i64 %cast1, %cast2
ret i64 %sub
}
; Negative test for 'nsw' - both geps must be inbounds
-define i64 @gep_diff_first_inbounds(i8* %foo, i64 %i, i64 %j) {
+define i64 @gep_diff_first_inbounds(ptr %foo, i64 %i, i64 %j) {
; CHECK-LABEL: @gep_diff_first_inbounds(
; CHECK-NEXT: [[GEPDIFF:%.*]] = sub i64 [[I:%.*]], [[J:%.*]]
; CHECK-NEXT: ret i64 [[GEPDIFF]]
;
- %gep1 = getelementptr inbounds i8, i8* %foo, i64 %i
- %gep2 = getelementptr i8, i8* %foo, i64 %j
- %cast1 = ptrtoint i8* %gep1 to i64
- %cast2 = ptrtoint i8* %gep2 to i64
+ %gep1 = getelementptr inbounds i8, ptr %foo, i64 %i
+ %gep2 = getelementptr i8, ptr %foo, i64 %j
+ %cast1 = ptrtoint ptr %gep1 to i64
+ %cast2 = ptrtoint ptr %gep2 to i64
%sub = sub i64 %cast1, %cast2
ret i64 %sub
}
; Negative test for 'nsw' - both geps must be inbounds
-define i64 @gep_diff_second_inbounds(i8* %foo, i64 %i, i64 %j) {
+define i64 @gep_diff_second_inbounds(ptr %foo, i64 %i, i64 %j) {
; CHECK-LABEL: @gep_diff_second_inbounds(
; CHECK-NEXT: [[GEPDIFF:%.*]] = sub i64 [[I:%.*]], [[J:%.*]]
; CHECK-NEXT: ret i64 [[GEPDIFF]]
;
- %gep1 = getelementptr i8, i8* %foo, i64 %i
- %gep2 = getelementptr inbounds i8, i8* %foo, i64 %j
- %cast1 = ptrtoint i8* %gep1 to i64
- %cast2 = ptrtoint i8* %gep2 to i64
+ %gep1 = getelementptr i8, ptr %foo, i64 %i
+ %gep2 = getelementptr inbounds i8, ptr %foo, i64 %j
+ %cast1 = ptrtoint ptr %gep1 to i64
+ %cast2 = ptrtoint ptr %gep2 to i64
%sub = sub i64 %cast1, %cast2
ret i64 %sub
}
-define i64 @gep_diff_with_bitcast(i64* %p, i64 %idx) {
+define i64 @gep_diff_with_bitcast(ptr %p, i64 %idx) {
; CHECK-LABEL: @gep_diff_with_bitcast(
; CHECK-NEXT: ret i64 [[IDX:%.*]]
;
- %i0 = bitcast i64* %p to [4 x i64]*
- %i1 = getelementptr inbounds [4 x i64], [4 x i64]* %i0, i64 %idx
- %i3 = ptrtoint [4 x i64]* %i1 to i64
- %i4 = ptrtoint i64* %p to i64
+ %i1 = getelementptr inbounds [4 x i64], ptr %p, i64 %idx
+ %i3 = ptrtoint ptr %i1 to i64
+ %i4 = ptrtoint ptr %p to i64
%i5 = sub nuw i64 %i3, %i4
%i6 = lshr i64 %i5, 5
ret i64 %i6
; Extra uses
-define i32 @neg_extra_use_or_lshr_i32(i32 %x, i32* %p) {
+define i32 @neg_extra_use_or_lshr_i32(i32 %x, ptr %p) {
; CHECK-LABEL: @neg_extra_use_or_lshr_i32(
; CHECK-NEXT: [[NEG:%.*]] = sub i32 0, [[X:%.*]]
; CHECK-NEXT: [[TMP1:%.*]] = icmp ne i32 [[X]], 0
; CHECK-NEXT: [[SHR:%.*]] = zext i1 [[TMP1]] to i32
-; CHECK-NEXT: store i32 [[NEG]], i32* [[P:%.*]], align 4
+; CHECK-NEXT: store i32 [[NEG]], ptr [[P:%.*]], align 4
; CHECK-NEXT: ret i32 [[SHR]]
;
%neg = sub i32 0, %x
%or = or i32 %neg, %x
%shr = lshr i32 %or, 31
- store i32 %neg, i32* %p
+ store i32 %neg, ptr %p
ret i32 %shr
}
; Negative Tests
-define i32 @neg_or_extra_use_lshr_i32(i32 %x, i32* %p) {
+define i32 @neg_or_extra_use_lshr_i32(i32 %x, ptr %p) {
; CHECK-LABEL: @neg_or_extra_use_lshr_i32(
; CHECK-NEXT: [[NEG:%.*]] = sub i32 0, [[X:%.*]]
; CHECK-NEXT: [[OR:%.*]] = or i32 [[NEG]], [[X]]
; CHECK-NEXT: [[SHR:%.*]] = lshr i32 [[OR]], 31
-; CHECK-NEXT: store i32 [[OR]], i32* [[P:%.*]], align 4
+; CHECK-NEXT: store i32 [[OR]], ptr [[P:%.*]], align 4
; CHECK-NEXT: ret i32 [[SHR]]
;
%neg = sub i32 0, %x
%or = or i32 %neg, %x
%shr = lshr i32 %or, 31
- store i32 %or, i32* %p
+ store i32 %or, ptr %p
ret i32 %shr
}
ret <2 x i8> %r
}
-define i5 @sub_umin_uses(i5 %a, i5 %b, i5* %p) {
+define i5 @sub_umin_uses(i5 %a, i5 %b, ptr %p) {
; CHECK-LABEL: define {{[^@]+}}@sub_umin_uses
-; CHECK-SAME: (i5 [[A:%.*]], i5 [[B:%.*]], i5* [[P:%.*]]) {
+; CHECK-SAME: (i5 [[A:%.*]], i5 [[B:%.*]], ptr [[P:%.*]]) {
; CHECK-NEXT: [[UMIN:%.*]] = call i5 @llvm.umin.i5(i5 [[A]], i5 [[B]])
-; CHECK-NEXT: store i5 [[UMIN]], i5* [[P]], align 1
+; CHECK-NEXT: store i5 [[UMIN]], ptr [[P]], align 1
; CHECK-NEXT: [[R:%.*]] = sub i5 [[A]], [[UMIN]]
; CHECK-NEXT: ret i5 [[R]]
;
%umin = call i5 @llvm.umin.i5(i5 %a, i5 %b)
- store i5 %umin, i5* %p
+ store i5 %umin, ptr %p
%r = sub i5 %a, %umin
ret i5 %r
}
define i32 @sub_constant_expression(i32 %x) {
; CHECK-LABEL: @sub_constant_expression(
-; CHECK-NEXT: [[R:%.*]] = sub i32 [[X:%.*]], ptrtoint (i32* @g to i32)
+; CHECK-NEXT: [[R:%.*]] = sub i32 [[X:%.*]], ptrtoint (ptr @g to i32)
; CHECK-NEXT: ret i32 [[R]]
;
- %r = sub i32 %x, ptrtoint (i32* @g to i32)
+ %r = sub i32 %x, ptrtoint (ptr @g to i32)
ret i32 %r
}
define <4 x i32> @sub_constant_expression_vec(<4 x i32> %x) {
; CHECK-LABEL: @sub_constant_expression_vec(
-; CHECK-NEXT: [[R:%.*]] = sub <4 x i32> [[X:%.*]], bitcast (i128 ptrtoint (i32* @g to i128) to <4 x i32>)
+; CHECK-NEXT: [[R:%.*]] = sub <4 x i32> [[X:%.*]], bitcast (i128 ptrtoint (ptr @g to i128) to <4 x i32>)
; CHECK-NEXT: ret <4 x i32> [[R]]
;
- %r = sub <4 x i32> %x, bitcast (i128 ptrtoint (i32* @g to i128) to <4 x i32>)
+ %r = sub <4 x i32> %x, bitcast (i128 ptrtoint (ptr @g to i128) to <4 x i32>)
ret <4 x i32> %r
}
ret i64 %neg
}
-define i64 @test_neg_shl_sub_extra_use1(i64 %a, i64 %b, i64* %p) {
+define i64 @test_neg_shl_sub_extra_use1(i64 %a, i64 %b, ptr %p) {
; CHECK-LABEL: @test_neg_shl_sub_extra_use1(
; CHECK-NEXT: [[SUB:%.*]] = sub i64 [[A:%.*]], [[B:%.*]]
-; CHECK-NEXT: store i64 [[SUB]], i64* [[P:%.*]], align 8
+; CHECK-NEXT: store i64 [[SUB]], ptr [[P:%.*]], align 8
; CHECK-NEXT: [[MUL_NEG:%.*]] = mul i64 [[SUB]], -4
; CHECK-NEXT: ret i64 [[MUL_NEG]]
;
%sub = sub i64 %a, %b
- store i64 %sub, i64* %p
+ store i64 %sub, ptr %p
%mul = shl i64 %sub, 2
%neg = sub i64 0, %mul
ret i64 %neg
}
-define i64 @test_neg_shl_sub_extra_use2(i64 %a, i64 %b, i64* %p) {
+define i64 @test_neg_shl_sub_extra_use2(i64 %a, i64 %b, ptr %p) {
; CHECK-LABEL: @test_neg_shl_sub_extra_use2(
; CHECK-NEXT: [[SUB:%.*]] = sub i64 [[A:%.*]], [[B:%.*]]
; CHECK-NEXT: [[MUL:%.*]] = shl i64 [[SUB]], 2
-; CHECK-NEXT: store i64 [[MUL]], i64* [[P:%.*]], align 8
+; CHECK-NEXT: store i64 [[MUL]], ptr [[P:%.*]], align 8
; CHECK-NEXT: [[NEG:%.*]] = sub i64 0, [[MUL]]
; CHECK-NEXT: ret i64 [[NEG]]
;
%sub = sub i64 %a, %b
%mul = shl i64 %sub, 2
- store i64 %mul, i64* %p
+ store i64 %mul, ptr %p
%neg = sub i64 0, %mul
ret i64 %neg
}
ret i64 %neg
}
-define i64 @test_neg_zext_i1_extra_use(i1 %a, i64 %b, i64* %p) {
+define i64 @test_neg_zext_i1_extra_use(i1 %a, i64 %b, ptr %p) {
; CHECK-LABEL: @test_neg_zext_i1_extra_use(
; CHECK-NEXT: [[EXT_NEG:%.*]] = sext i1 [[A:%.*]] to i64
; CHECK-NEXT: [[EXT:%.*]] = zext i1 [[A]] to i64
-; CHECK-NEXT: store i64 [[EXT]], i64* [[P:%.*]], align 8
+; CHECK-NEXT: store i64 [[EXT]], ptr [[P:%.*]], align 8
; CHECK-NEXT: ret i64 [[EXT_NEG]]
;
%ext = zext i1 %a to i64
%neg = sub i64 0, %ext
- store i64 %ext, i64* %p
+ store i64 %ext, ptr %p
ret i64 %neg
}
-define i64 @test_neg_sext_i1_extra_use(i1 %a, i64 %b, i64* %p) {
+define i64 @test_neg_sext_i1_extra_use(i1 %a, i64 %b, ptr %p) {
; CHECK-LABEL: @test_neg_sext_i1_extra_use(
; CHECK-NEXT: [[EXT_NEG:%.*]] = zext i1 [[A:%.*]] to i64
; CHECK-NEXT: [[EXT:%.*]] = sext i1 [[A]] to i64
-; CHECK-NEXT: store i64 [[EXT]], i64* [[P:%.*]], align 8
+; CHECK-NEXT: store i64 [[EXT]], ptr [[P:%.*]], align 8
; CHECK-NEXT: ret i64 [[EXT_NEG]]
;
%ext = sext i1 %a to i64
%neg = sub i64 0, %ext
- store i64 %ext, i64* %p
+ store i64 %ext, ptr %p
ret i64 %neg
}
ret i32 %Y
}
-@dummy_global1 = external global i8*
-@dummy_global2 = external global i8*
+@dummy_global1 = external global ptr
+@dummy_global2 = external global ptr
-define i64 @test58([100 x [100 x i8]]* %foo, i64 %i, i64 %j) {
+define i64 @test58(ptr %foo, i64 %i, i64 %j) {
; CHECK-LABEL: @test58(
; CHECK-NEXT: [[TMP1:%.*]] = sub i64 [[I:%.*]], [[J:%.*]]
; CHECK-NEXT: ret i64 [[TMP1]]
;
- %gep1 = getelementptr inbounds [100 x [100 x i8]], [100 x [100 x i8]]* %foo, i64 0, i64 42, i64 %i
- %gep2 = getelementptr inbounds [100 x [100 x i8]], [100 x [100 x i8]]* %foo, i64 0, i64 42, i64 %j
- %cast1 = ptrtoint i8* %gep1 to i64
- %cast2 = ptrtoint i8* %gep2 to i64
+ %gep1 = getelementptr inbounds [100 x [100 x i8]], ptr %foo, i64 0, i64 42, i64 %i
+ %gep2 = getelementptr inbounds [100 x [100 x i8]], ptr %foo, i64 0, i64 42, i64 %j
+ %cast1 = ptrtoint ptr %gep1 to i64
+ %cast2 = ptrtoint ptr %gep2 to i64
%sub = sub i64 %cast1, %cast2
ret i64 %sub
}
-define i64 @test59([100 x [100 x i8]]* %foo, i64 %i) {
+define i64 @test59(ptr %foo, i64 %i) {
; CHECK-LABEL: @test59(
-; CHECK-NEXT: [[GEP1:%.*]] = getelementptr inbounds [100 x [100 x i8]], [100 x [100 x i8]]* [[FOO:%.*]], i64 0, i64 42, i64 [[I:%.*]]
-; CHECK-NEXT: [[GEP2:%.*]] = getelementptr inbounds [100 x [100 x i8]], [100 x [100 x i8]]* [[FOO]], i64 0, i64 42, i64 0
-; CHECK-NEXT: store i8* [[GEP1]], i8** @dummy_global1, align 8
-; CHECK-NEXT: store i8* [[GEP2]], i8** @dummy_global2, align 8
+; CHECK-NEXT: [[GEP1:%.*]] = getelementptr inbounds [100 x [100 x i8]], ptr [[FOO:%.*]], i64 0, i64 42, i64 [[I:%.*]]
+; CHECK-NEXT: [[GEP2:%.*]] = getelementptr inbounds [100 x [100 x i8]], ptr [[FOO]], i64 0, i64 42, i64 0
+; CHECK-NEXT: store ptr [[GEP1]], ptr @dummy_global1, align 8
+; CHECK-NEXT: store ptr [[GEP2]], ptr @dummy_global2, align 8
; CHECK-NEXT: ret i64 [[I]]
;
; gep1 and gep2 have more than one uses
- %gep1 = getelementptr inbounds [100 x [100 x i8]], [100 x [100 x i8]]* %foo, i64 0, i64 42, i64 %i
- %gep2 = getelementptr inbounds [100 x [100 x i8]], [100 x [100 x i8]]* %foo, i64 0, i64 42, i64 0
- %cast1 = ptrtoint i8* %gep1 to i64
- %cast2 = ptrtoint i8* %gep2 to i64
+ %gep1 = getelementptr inbounds [100 x [100 x i8]], ptr %foo, i64 0, i64 42, i64 %i
+ %gep2 = getelementptr inbounds [100 x [100 x i8]], ptr %foo, i64 0, i64 42, i64 0
+ %cast1 = ptrtoint ptr %gep1 to i64
+ %cast2 = ptrtoint ptr %gep2 to i64
%sub = sub i64 %cast1, %cast2
- store i8* %gep1, i8** @dummy_global1
- store i8* %gep2, i8** @dummy_global2
+ store ptr %gep1, ptr @dummy_global1
+ store ptr %gep2, ptr @dummy_global2
ret i64 %sub
}
-define i64 @test60([100 x [100 x i8]]* %foo, i64 %i, i64 %j) {
+define i64 @test60(ptr %foo, i64 %i, i64 %j) {
; CHECK-LABEL: @test60(
-; CHECK-NEXT: [[GEP1:%.*]] = getelementptr inbounds [100 x [100 x i8]], [100 x [100 x i8]]* [[FOO:%.*]], i64 0, i64 [[J:%.*]], i64 [[I:%.*]]
-; CHECK-NEXT: [[GEP2:%.*]] = getelementptr inbounds [100 x [100 x i8]], [100 x [100 x i8]]* [[FOO]], i64 0, i64 42, i64 0
-; CHECK-NEXT: [[CAST1:%.*]] = ptrtoint i8* [[GEP1]] to i64
-; CHECK-NEXT: [[CAST2:%.*]] = ptrtoint i8* [[GEP2]] to i64
+; CHECK-NEXT: [[GEP1:%.*]] = getelementptr inbounds [100 x [100 x i8]], ptr [[FOO:%.*]], i64 0, i64 [[J:%.*]], i64 [[I:%.*]]
+; CHECK-NEXT: [[GEP2:%.*]] = getelementptr inbounds [100 x [100 x i8]], ptr [[FOO]], i64 0, i64 42, i64 0
+; CHECK-NEXT: [[CAST1:%.*]] = ptrtoint ptr [[GEP1]] to i64
+; CHECK-NEXT: [[CAST2:%.*]] = ptrtoint ptr [[GEP2]] to i64
; CHECK-NEXT: [[SUB:%.*]] = sub i64 [[CAST1]], [[CAST2]]
-; CHECK-NEXT: store i8* [[GEP1]], i8** @dummy_global1, align 8
+; CHECK-NEXT: store ptr [[GEP1]], ptr @dummy_global1, align 8
; CHECK-NEXT: ret i64 [[SUB]]
;
; gep1 has a non-constant index and more than one uses. Shouldn't duplicate the arithmetic.
- %gep1 = getelementptr inbounds [100 x [100 x i8]], [100 x [100 x i8]]* %foo, i64 0, i64 %j, i64 %i
- %gep2 = getelementptr inbounds [100 x [100 x i8]], [100 x [100 x i8]]* %foo, i64 0, i64 42, i64 0
- %cast1 = ptrtoint i8* %gep1 to i64
- %cast2 = ptrtoint i8* %gep2 to i64
+ %gep1 = getelementptr inbounds [100 x [100 x i8]], ptr %foo, i64 0, i64 %j, i64 %i
+ %gep2 = getelementptr inbounds [100 x [100 x i8]], ptr %foo, i64 0, i64 42, i64 0
+ %cast1 = ptrtoint ptr %gep1 to i64
+ %cast2 = ptrtoint ptr %gep2 to i64
%sub = sub i64 %cast1, %cast2
- store i8* %gep1, i8** @dummy_global1
+ store ptr %gep1, ptr @dummy_global1
ret i64 %sub
}
-define i64 @test61([100 x [100 x i8]]* %foo, i64 %i, i64 %j) {
+define i64 @test61(ptr %foo, i64 %i, i64 %j) {
; CHECK-LABEL: @test61(
-; CHECK-NEXT: [[GEP1:%.*]] = getelementptr inbounds [100 x [100 x i8]], [100 x [100 x i8]]* [[FOO:%.*]], i64 0, i64 42, i64 0
-; CHECK-NEXT: [[GEP2:%.*]] = getelementptr inbounds [100 x [100 x i8]], [100 x [100 x i8]]* [[FOO]], i64 0, i64 [[J:%.*]], i64 [[I:%.*]]
-; CHECK-NEXT: [[CAST1:%.*]] = ptrtoint i8* [[GEP1]] to i64
-; CHECK-NEXT: [[CAST2:%.*]] = ptrtoint i8* [[GEP2]] to i64
+; CHECK-NEXT: [[GEP1:%.*]] = getelementptr inbounds [100 x [100 x i8]], ptr [[FOO:%.*]], i64 0, i64 42, i64 0
+; CHECK-NEXT: [[GEP2:%.*]] = getelementptr inbounds [100 x [100 x i8]], ptr [[FOO]], i64 0, i64 [[J:%.*]], i64 [[I:%.*]]
+; CHECK-NEXT: [[CAST1:%.*]] = ptrtoint ptr [[GEP1]] to i64
+; CHECK-NEXT: [[CAST2:%.*]] = ptrtoint ptr [[GEP2]] to i64
; CHECK-NEXT: [[SUB:%.*]] = sub i64 [[CAST1]], [[CAST2]]
-; CHECK-NEXT: store i8* [[GEP2]], i8** @dummy_global2, align 8
+; CHECK-NEXT: store ptr [[GEP2]], ptr @dummy_global2, align 8
; CHECK-NEXT: ret i64 [[SUB]]
;
; gep2 has a non-constant index and more than one uses. Shouldn't duplicate the arithmetic.
- %gep1 = getelementptr inbounds [100 x [100 x i8]], [100 x [100 x i8]]* %foo, i64 0, i64 42, i64 0
- %gep2 = getelementptr inbounds [100 x [100 x i8]], [100 x [100 x i8]]* %foo, i64 0, i64 %j, i64 %i
- %cast1 = ptrtoint i8* %gep1 to i64
- %cast2 = ptrtoint i8* %gep2 to i64
+ %gep1 = getelementptr inbounds [100 x [100 x i8]], ptr %foo, i64 0, i64 42, i64 0
+ %gep2 = getelementptr inbounds [100 x [100 x i8]], ptr %foo, i64 0, i64 %j, i64 %i
+ %cast1 = ptrtoint ptr %gep1 to i64
+ %cast2 = ptrtoint ptr %gep2 to i64
%sub = sub i64 %cast1, %cast2
- store i8* %gep2, i8** @dummy_global2
+ store ptr %gep2, ptr @dummy_global2
ret i64 %sub
}
ret i8 %r
}
-define <2 x i8> @sub_mask_lowbits_splat_extra_use(<2 x i8> %x, <2 x i8>* %p) {
+define <2 x i8> @sub_mask_lowbits_splat_extra_use(<2 x i8> %x, ptr %p) {
; CHECK-LABEL: @sub_mask_lowbits_splat_extra_use(
; CHECK-NEXT: [[A2:%.*]] = and <2 x i8> [[X:%.*]], <i8 10, i8 10>
-; CHECK-NEXT: store <2 x i8> [[A2]], <2 x i8>* [[P:%.*]], align 2
+; CHECK-NEXT: store <2 x i8> [[A2]], ptr [[P:%.*]], align 2
; CHECK-NEXT: [[TMP1:%.*]] = and <2 x i8> [[X]], <i8 -11, i8 -11>
; CHECK-NEXT: [[R:%.*]] = add <2 x i8> [[TMP1]], <i8 -64, i8 -64>
; CHECK-NEXT: ret <2 x i8> [[R]]
;
%a1 = add <2 x i8> %x, <i8 192, i8 192> ; 0xc0
%a2 = and <2 x i8> %x, <i8 10, i8 10> ; 0x0a
- store <2 x i8> %a2, <2 x i8>* %p
+ store <2 x i8> %a2, ptr %p
%r = sub <2 x i8> %a1, %a2
ret <2 x i8> %r
}
; The swifterror value can only be loaded, stored or used as swifterror
; argument. Make sure we do not try to turn the function bitcast into an
; argument bitcast.
-define swiftcc void @spam(i32** swifterror %arg) {
+define swiftcc void @spam(ptr swifterror %arg) {
; CHECK-LABEL: @spam(
; CHECK-NEXT: bb:
-; CHECK-NEXT: call swiftcc void bitcast (void (i64**)* @widget to void (i32**)*)(i32** swifterror [[ARG:%.*]])
+; CHECK-NEXT: call swiftcc void @widget(ptr swifterror [[ARG:%.*]])
; CHECK-NEXT: ret void
;
bb:
- call swiftcc void bitcast (void (i64**)* @widget to void (i32**)*)(i32** swifterror %arg)
+ call swiftcc void @widget(ptr swifterror %arg)
ret void
}
-declare swiftcc void @widget(i64**)
+declare swiftcc void @widget(ptr)
; PR30486
define i32 @single_case() {
; CHECK-LABEL: @single_case(
-; CHECK-NEXT: switch i32 ptrtoint (i32* @g to i32), label %x [
+; CHECK-NEXT: switch i32 ptrtoint (ptr @g to i32), label %x [
; CHECK-NEXT: ]
; CHECK: x:
; CHECK-NEXT: ret i32 0
;
- switch i32 add (i32 ptrtoint (i32* @g to i32), i32 -1), label %x []
+ switch i32 add (i32 ptrtoint (ptr @g to i32), i32 -1), label %x []
x:
ret i32 0
}
define i32 @multiple_cases() {
; CHECK-LABEL: @multiple_cases(
-; CHECK-NEXT: switch i32 ptrtoint (i32* @g to i32), label %x [
+; CHECK-NEXT: switch i32 ptrtoint (ptr @g to i32), label %x [
; CHECK-NEXT: i32 2, label %one
; CHECK-NEXT: i32 3, label %two
; CHECK-NEXT: ]
; CHECK: two:
; CHECK-NEXT: ret i32 2
;
- switch i32 add (i32 ptrtoint (i32* @g to i32), i32 -1), label %x [
+ switch i32 add (i32 ptrtoint (ptr @g to i32), i32 -1), label %x [
i32 1, label %one
i32 2, label %two
]
; CHECK-LABEL: define float @mytan(
; CHECK: ret float %x
-define float @test2(float ()* %fptr) {
+define float @test2(ptr %fptr) {
%call1 = call fast float %fptr()
%tan = call fast float @tanf(float %call1)
ret float %tan
; RUN: opt -S -passes=instcombine < %s 2>&1 | FileCheck %s
-define i64 @f(i64* %p1, i64* %p2) {
+define i64 @f(ptr %p1, ptr %p2) {
top:
; check that the tbaa is preserved
; CHECK-LABEL: @f(
- ; CHECK: %v1 = load i64, i64* %p1, align 8, !tbaa !0
- ; CHECK: store i64 %v1, i64* %p2, align 8
+ ; CHECK: %v1 = load i64, ptr %p1, align 8, !tbaa !0
+ ; CHECK: store i64 %v1, ptr %p2, align 8
; CHECK: ret i64 %v1
- %v1 = load i64, i64* %p1, align 8, !tbaa !0
- store i64 %v1, i64* %p2, align 8
- %v2 = load i64, i64* %p2, align 8
+ %v1 = load i64, ptr %p1, align 8, !tbaa !0
+ store i64 %v1, ptr %p2, align 8
+ %v2 = load i64, ptr %p2, align 8
ret i64 %v2
}
; with said constant. This matters because it could be e.g. a select between
; two constants, that happens after the first use of an alloca.
-%t0 = type { i8*, i64 }
+%t0 = type { ptr, i64 }
@g0 = external constant %t0
@g1 = external constant %t0
-define void @test(i8*%out) {
+define void @test(ptr %out) {
; CHECK-LABEL: @test(
; CHECK-NEXT: [[I0:%.*]] = alloca [[T0:%.*]], align 8
-; CHECK-NEXT: [[I1:%.*]] = bitcast %t0* [[I0]] to i8*
; CHECK-NEXT: [[I2:%.*]] = call i1 @get_cond()
-; CHECK-NEXT: [[I3:%.*]] = select i1 [[I2]], i8* bitcast (%t0* @g0 to i8*), i8* bitcast (%t0* @g1 to i8*)
-; CHECK-NEXT: call void @llvm.memcpy.p0i8.p0i8.i64(i8* noundef nonnull align 8 dereferenceable(16) [[I1]], i8* noundef nonnull align 8 dereferenceable(16) [[I3]], i64 16, i1 false)
-; CHECK-NEXT: call void @llvm.memcpy.p0i8.p0i8.i64(i8* noundef nonnull align 1 dereferenceable(16) [[OUT:%.*]], i8* noundef nonnull align 8 dereferenceable(16) [[I1]], i64 16, i1 false)
+; CHECK-NEXT: [[I3:%.*]] = select i1 [[I2]], ptr @g0, ptr @g1
+; CHECK-NEXT: call void @llvm.memcpy.p0.p0.i64(ptr noundef nonnull align 8 dereferenceable(16) [[I0]], ptr noundef nonnull align 8 dereferenceable(16) [[I3]], i64 16, i1 false)
+; CHECK-NEXT: call void @llvm.memcpy.p0.p0.i64(ptr noundef nonnull align 1 dereferenceable(16) [[OUT:%.*]], ptr noundef nonnull align 8 dereferenceable(16) [[I0]], i64 16, i1 false)
; CHECK-NEXT: ret void
;
%i0 = alloca %t0
- %i1 = bitcast %t0* %i0 to i8*
%i2 = call i1 @get_cond()
- %i3 = select i1 %i2, i8* bitcast (%t0* @g0 to i8*), i8* bitcast (%t0* @g1 to i8*)
- call void @llvm.memcpy.p0i8.p0i8.i64(i8* %i1, i8* %i3, i64 16, i1 false)
- call void @llvm.memcpy.p0i8.p0i8.i64(i8* %out, i8* %i1, i64 16, i1 false)
+ %i3 = select i1 %i2, ptr @g0, ptr @g1
+ call void @llvm.memcpy.p0.p0.i64(ptr %i0, ptr %i3, i64 16, i1 false)
+ call void @llvm.memcpy.p0.p0.i64(ptr %out, ptr %i0, i64 16, i1 false)
ret void
}
; CHECK-LABEL: @test2(
; CHECK-NEXT: bb:
; CHECK-NEXT: [[I:%.*]] = alloca [[T0:%.*]], align 8
-; CHECK-NEXT: [[I1:%.*]] = call i32 @func(%t0* undef)
+; CHECK-NEXT: [[I1:%.*]] = call i32 @func(ptr undef)
; CHECK-NEXT: [[I2:%.*]] = icmp eq i32 [[I1]], 2503
-; CHECK-NEXT: [[I3:%.*]] = select i1 [[I2]], i8* bitcast (%t0* @g0 to i8*), i8* bitcast (%t0* @g1 to i8*)
-; CHECK-NEXT: [[I4:%.*]] = bitcast %t0* [[I]] to i8*
-; CHECK-NEXT: call void @llvm.memcpy.p0i8.p0i8.i64(i8* noundef nonnull align 8 dereferenceable(16) [[I4]], i8* noundef nonnull align 8 dereferenceable(16) [[I3]], i64 16, i1 false)
-; CHECK-NEXT: [[I5:%.*]] = call i32 @func(%t0* nonnull byval([[T0]]) [[I]])
+; CHECK-NEXT: [[I3:%.*]] = select i1 [[I2]], ptr @g0, ptr @g1
+; CHECK-NEXT: call void @llvm.memcpy.p0.p0.i64(ptr noundef nonnull align 8 dereferenceable(16) [[I]], ptr noundef nonnull align 8 dereferenceable(16) [[I3]], i64 16, i1 false)
+; CHECK-NEXT: [[I5:%.*]] = call i32 @func(ptr nonnull byval([[T0]]) [[I]])
; CHECK-NEXT: unreachable
;
bb:
%i = alloca %t0, align 8
- %i1 = call i32 @func(%t0* undef)
+ %i1 = call i32 @func(ptr undef)
%i2 = icmp eq i32 %i1, 2503
- %i3 = select i1 %i2, i8* bitcast (%t0* @g0 to i8*), i8* bitcast (%t0* @g1 to i8*)
- %i4 = bitcast %t0* %i to i8*
- call void @llvm.memcpy.p0i8.p0i8.i64(i8* noundef nonnull align 8 dereferenceable(16) %i4, i8* noundef nonnull align 8 dereferenceable(16) %i3, i64 16, i1 false)
- %i5 = call i32 @func(%t0* nonnull byval(%t0) %i)
+ %i3 = select i1 %i2, ptr @g0, ptr @g1
+ call void @llvm.memcpy.p0.p0.i64(ptr noundef nonnull align 8 dereferenceable(16) %i, ptr noundef nonnull align 8 dereferenceable(16) %i3, i64 16, i1 false)
+ %i5 = call i32 @func(ptr nonnull byval(%t0) %i)
unreachable
}
-declare i32 @func(%t0*)
+declare i32 @func(ptr)
declare i1 @get_cond()
-declare void @llvm.memcpy.p0i8.p0i8.i64(i8*, i8*, i64, i1)
+declare void @llvm.memcpy.p0.p0.i64(ptr, ptr, i64, i1)
declare i32 @__CxxFrameHandler3(...)
-define void @test1() personality i32 (...)* @__CxxFrameHandler3 {
+define void @test1() personality ptr @__CxxFrameHandler3 {
bb:
unreachable
; CHECK: %cl = cleanuppad within none []
; CHECK: cleanupret from %cl unwind to caller
-define void @test2(i8 %A, i8 %B) personality i32 (...)* @__CxxFrameHandler3 {
+define void @test2(i8 %A, i8 %B) personality ptr @__CxxFrameHandler3 {
bb:
%X = zext i8 %A to i32
invoke void @g(i32 0)
; CHECK: %Y = zext i8 %B to i32
; CHECK: %phi = phi i32 [ %X, %bb ], [ %Y, %cont ]
-define void @test3(i8 %A, i8 %B) personality i32 (...)* @__CxxFrameHandler3 {
+define void @test3(i8 %A, i8 %B) personality ptr @__CxxFrameHandler3 {
bb:
%X = zext i8 %A to i32
invoke void @g(i32 0)
; CHECK: %phi = phi i32 [ %X, %bb ], [ %Y, %cont ], [ %Y, %cont2 ]
declare void @foo()
-declare token @llvm.experimental.gc.statepoint.p0f_isVoidf(i64, i32, void ()*, i32, i32, ...)
+declare token @llvm.experimental.gc.statepoint.p0(i64, i32, ptr, i32, i32, ...)
-define void @test4(i8 addrspace(1)* %obj) gc "statepoint-example" {
+define void @test4(ptr addrspace(1) %obj) gc "statepoint-example" {
bb:
unreachable
unreachable:
- call token (i64, i32, void ()*, i32, i32, ...) @llvm.experimental.gc.statepoint.p0f_isVoidf(i64 0, i32 0, void ()* elementtype(void ()) @foo, i32 0, i32 0, i32 0, i32 0) ["deopt" (i32 0, i32 -1, i32 0, i32 0, i32 0)]
+ call token (i64, i32, ptr, i32, i32, ...) @llvm.experimental.gc.statepoint.p0(i64 0, i32 0, ptr elementtype(void ()) @foo, i32 0, i32 0, i32 0, i32 0) ["deopt" (i32 0, i32 -1, i32 0, i32 0, i32 0)]
ret void
}
; CHECK-LABEL: define void @test4(
; CHECK: unreachable:
-; CHECK: call token (i64, i32, void ()*, i32, i32, ...) @llvm.experimental.gc.statepoint.p0f_isVoidf(i64 0, i32 0, void ()* elementtype(void ()) @foo, i32 0, i32 0, i32 0, i32 0) [ "deopt"(i32 0, i32 -1, i32 0, i32 0, i32 0) ]
+; CHECK: call token (i64, i32, ptr, i32, i32, ...) @llvm.experimental.gc.statepoint.p0(i64 0, i32 0, ptr elementtype(void ()) @foo, i32 0, i32 0, i32 0, i32 0) [ "deopt"(i32 0, i32 -1, i32 0, i32 0, i32 0) ]
; CHECK: ret void
; NOTE: Assertions have been autogenerated by utils/update_test_checks.py
; RUN: opt -passes=instcombine -S < %s | FileCheck %s
-declare void @llvm.lifetime.start.p0i8(i64 immarg, i8* nocapture)
-declare void @llvm.lifetime.end.p0i8(i64 immarg, i8* nocapture)
+declare void @llvm.lifetime.start.p0(i64 immarg, ptr nocapture)
+declare void @llvm.lifetime.end.p0(i64 immarg, ptr nocapture)
declare void @unknown()
-declare void @f(i8*)
-declare void @f2(i8*, i8*)
-declare i8* @f3(i8*, i8*)
+declare void @f(ptr)
+declare void @f2(ptr, ptr)
+declare ptr @f3(ptr, ptr)
; Basic case for DSEing a trivially dead writing call
define void @test_dead() {
; CHECK-NEXT: ret void
;
%a = alloca i32, align 4
- %bitcast = bitcast i32* %a to i8*
- call void @f(i8* writeonly nocapture %bitcast) argmemonly nounwind willreturn
+ call void @f(ptr writeonly nocapture %a) argmemonly nounwind willreturn
ret void
}
; CHECK-NEXT: ret void
;
%a = alloca i32, align 4
- %bitcast = bitcast i32* %a to i8*
- call void @llvm.lifetime.start.p0i8(i64 4, i8* %bitcast)
- call void @f(i8* writeonly nocapture %bitcast) argmemonly nounwind willreturn
- call void @llvm.lifetime.end.p0i8(i64 4, i8* %bitcast)
+ call void @llvm.lifetime.start.p0(i64 4, ptr %a)
+ call void @f(ptr writeonly nocapture %a) argmemonly nounwind willreturn
+ call void @llvm.lifetime.end.p0(i64 4, ptr %a)
ret void
}
; CHECK-NEXT: ret void
;
%a = alloca i32, align 4
- %bitcast = bitcast i32* %a to i8*
- call void @llvm.lifetime.start.p0i8(i64 4, i8* %bitcast)
+ call void @llvm.lifetime.start.p0(i64 4, ptr %a)
call void @unknown()
- call void @f(i8* writeonly nocapture %bitcast) argmemonly nounwind willreturn
+ call void @f(ptr writeonly nocapture %a) argmemonly nounwind willreturn
call void @unknown()
- call void @llvm.lifetime.end.p0i8(i64 4, i8* %bitcast)
+ call void @llvm.lifetime.end.p0(i64 4, ptr %a)
ret void
}
; CHECK-NEXT: ret void
;
%a = alloca i32, align 4
- %bitcast = bitcast i32* %a to i8*
- call void @f(i8* nocapture %bitcast) argmemonly nounwind willreturn
+ call void @f(ptr nocapture %a) argmemonly nounwind willreturn
ret void
}
define i32 @test_neg_read_after() {
; CHECK-LABEL: @test_neg_read_after(
; CHECK-NEXT: [[A:%.*]] = alloca i32, align 4
-; CHECK-NEXT: [[BITCAST:%.*]] = bitcast i32* [[A]] to i8*
-; CHECK-NEXT: call void @f(i8* nocapture nonnull writeonly [[BITCAST]]) #[[ATTR3:[0-9]+]]
-; CHECK-NEXT: [[RES:%.*]] = load i32, i32* [[A]], align 4
+; CHECK-NEXT: call void @f(ptr nocapture nonnull writeonly [[A]]) #[[ATTR3:[0-9]+]]
+; CHECK-NEXT: [[RES:%.*]] = load i32, ptr [[A]], align 4
; CHECK-NEXT: ret i32 [[RES]]
;
%a = alloca i32, align 4
- %bitcast = bitcast i32* %a to i8*
- call void @f(i8* writeonly nocapture %bitcast) argmemonly nounwind willreturn
- %res = load i32, i32* %a
+ call void @f(ptr writeonly nocapture %a) argmemonly nounwind willreturn
+ %res = load i32, ptr %a
ret i32 %res
}
define void @test_neg_infinite_loop() {
; CHECK-LABEL: @test_neg_infinite_loop(
; CHECK-NEXT: [[A:%.*]] = alloca i32, align 4
-; CHECK-NEXT: [[BITCAST:%.*]] = bitcast i32* [[A]] to i8*
-; CHECK-NEXT: call void @f(i8* nocapture nonnull writeonly [[BITCAST]]) #[[ATTR4:[0-9]+]]
+; CHECK-NEXT: call void @f(ptr nocapture nonnull writeonly [[A]]) #[[ATTR4:[0-9]+]]
; CHECK-NEXT: ret void
;
%a = alloca i32, align 4
- %bitcast = bitcast i32* %a to i8*
- call void @f(i8* writeonly nocapture %bitcast) argmemonly nounwind
+ call void @f(ptr writeonly nocapture %a) argmemonly nounwind
ret void
}
define void @test_neg_throw() {
; CHECK-LABEL: @test_neg_throw(
; CHECK-NEXT: [[A:%.*]] = alloca i32, align 4
-; CHECK-NEXT: [[BITCAST:%.*]] = bitcast i32* [[A]] to i8*
-; CHECK-NEXT: call void @f(i8* nocapture nonnull writeonly [[BITCAST]]) #[[ATTR5:[0-9]+]]
+; CHECK-NEXT: call void @f(ptr nocapture nonnull writeonly [[A]]) #[[ATTR5:[0-9]+]]
; CHECK-NEXT: ret void
;
%a = alloca i32, align 4
- %bitcast = bitcast i32* %a to i8*
- call void @f(i8* writeonly nocapture %bitcast) argmemonly willreturn
+ call void @f(ptr writeonly nocapture %a) argmemonly willreturn
ret void
}
define void @test_neg_extra_write() {
; CHECK-LABEL: @test_neg_extra_write(
; CHECK-NEXT: [[A:%.*]] = alloca i32, align 4
-; CHECK-NEXT: [[BITCAST:%.*]] = bitcast i32* [[A]] to i8*
-; CHECK-NEXT: call void @f(i8* nocapture nonnull writeonly [[BITCAST]]) #[[ATTR6:[0-9]+]]
+; CHECK-NEXT: call void @f(ptr nocapture nonnull writeonly [[A]]) #[[ATTR6:[0-9]+]]
; CHECK-NEXT: ret void
;
%a = alloca i32, align 4
- %bitcast = bitcast i32* %a to i8*
- call void @f(i8* writeonly nocapture %bitcast) nounwind willreturn
+ call void @f(ptr writeonly nocapture %a) nounwind willreturn
ret void
}
; CHECK-LABEL: @test_neg_unmodeled_write(
; CHECK-NEXT: [[A:%.*]] = alloca i32, align 4
; CHECK-NEXT: [[A2:%.*]] = alloca i32, align 4
-; CHECK-NEXT: [[BITCAST:%.*]] = bitcast i32* [[A]] to i8*
-; CHECK-NEXT: [[BITCAST2:%.*]] = bitcast i32* [[A2]] to i8*
-; CHECK-NEXT: call void @f2(i8* nocapture nonnull writeonly [[BITCAST]], i8* nonnull [[BITCAST2]]) #[[ATTR3]]
+; CHECK-NEXT: call void @f2(ptr nocapture nonnull writeonly [[A]], ptr nonnull [[A2]]) #[[ATTR3]]
; CHECK-NEXT: ret void
;
%a = alloca i32, align 4
%a2 = alloca i32, align 4
- %bitcast = bitcast i32* %a to i8*
- %bitcast2 = bitcast i32* %a2 to i8*
- call void @f2(i8* nocapture writeonly %bitcast, i8* %bitcast2) argmemonly nounwind willreturn
+ call void @f2(ptr nocapture writeonly %a, ptr %a2) argmemonly nounwind willreturn
ret void
}
define i32 @test_neg_captured_by_call() {
; CHECK-LABEL: @test_neg_captured_by_call(
; CHECK-NEXT: [[A:%.*]] = alloca i32, align 4
-; CHECK-NEXT: [[A2:%.*]] = alloca i8*, align 8
-; CHECK-NEXT: [[BITCAST:%.*]] = bitcast i32* [[A]] to i8*
-; CHECK-NEXT: [[BITCAST2:%.*]] = bitcast i8** [[A2]] to i8*
-; CHECK-NEXT: call void @f2(i8* nonnull writeonly [[BITCAST]], i8* nonnull [[BITCAST2]]) #[[ATTR3]]
-; CHECK-NEXT: [[TMP1:%.*]] = bitcast i8** [[A2]] to i32**
-; CHECK-NEXT: [[A_COPY_CAST1:%.*]] = load i32*, i32** [[TMP1]], align 8
-; CHECK-NEXT: [[RES:%.*]] = load i32, i32* [[A_COPY_CAST1]], align 4
+; CHECK-NEXT: [[A2:%.*]] = alloca ptr, align 8
+; CHECK-NEXT: call void @f2(ptr nonnull writeonly [[A]], ptr nonnull [[A2]]) #[[ATTR3]]
+; CHECK-NEXT: [[A_COPY_CAST1:%.*]] = load ptr, ptr [[A2]], align 8
+; CHECK-NEXT: [[RES:%.*]] = load i32, ptr [[A_COPY_CAST1]], align 4
; CHECK-NEXT: ret i32 [[RES]]
;
%a = alloca i32, align 4
- %a2 = alloca i8*, align 4
- %bitcast = bitcast i32* %a to i8*
- %bitcast2 = bitcast i8** %a2 to i8*
- call void @f2(i8* writeonly %bitcast, i8* %bitcast2) argmemonly nounwind willreturn
- %a_copy_cast = load i8*, i8** %a2
- %a_copy = bitcast i8* %a_copy_cast to i32*
- %res = load i32, i32* %a_copy
+ %a2 = alloca ptr, align 4
+ call void @f2(ptr writeonly %a, ptr %a2) argmemonly nounwind willreturn
+ %a_copy_cast = load ptr, ptr %a2
+ %res = load i32, ptr %a_copy_cast
ret i32 %res
}
define i32 @test_neg_captured_before() {
; CHECK-LABEL: @test_neg_captured_before(
; CHECK-NEXT: [[A:%.*]] = alloca i32, align 4
-; CHECK-NEXT: [[BITCAST:%.*]] = bitcast i32* [[A]] to i8*
-; CHECK-NEXT: call void @f(i8* nocapture nonnull writeonly [[BITCAST]]) #[[ATTR3]]
-; CHECK-NEXT: [[RES:%.*]] = load i32, i32* [[A]], align 4
+; CHECK-NEXT: call void @f(ptr nocapture nonnull writeonly [[A]]) #[[ATTR3]]
+; CHECK-NEXT: [[RES:%.*]] = load i32, ptr [[A]], align 4
; CHECK-NEXT: ret i32 [[RES]]
;
%a = alloca i32, align 4
- %a2 = alloca i8*, align 4
- %bitcast = bitcast i32* %a to i8*
- %bitcast2 = bitcast i8** %a2 to i8*
- store i8* %bitcast, i8** %a2
- call void @f(i8* writeonly nocapture %bitcast) argmemonly nounwind willreturn
- %a_copy_cast = load i8*, i8** %a2
- %a_copy = bitcast i8* %a_copy_cast to i32*
- %res = load i32, i32* %a_copy
+ %a2 = alloca ptr, align 4
+ store ptr %a, ptr %a2
+ call void @f(ptr writeonly nocapture %a) argmemonly nounwind willreturn
+ %a_copy_cast = load ptr, ptr %a2
+ %res = load i32, ptr %a_copy_cast
ret i32 %res
}
;
%a = alloca i32, align 4
%a2 = alloca i32, align 4
- %bitcast = bitcast i32* %a to i8*
- %bitcast2 = bitcast i32* %a2 to i8*
- call void @f2(i8* nocapture writeonly %bitcast, i8* nocapture readonly %bitcast2) argmemonly nounwind willreturn
+ call void @f2(ptr nocapture writeonly %a, ptr nocapture readonly %a2) argmemonly nounwind willreturn
ret void
}
;
%a = alloca i32, align 4
%a2 = alloca i32, align 4
- %bitcast = bitcast i32* %a to i8*
- %bitcast2 = bitcast i32* %a2 to i8*
- call i8* @f3(i8* nocapture writeonly %bitcast, i8* readonly %bitcast2) argmemonly nounwind willreturn
+ call ptr @f3(ptr nocapture writeonly %a, ptr readonly %a2) argmemonly nounwind willreturn
ret void
}
-; Cannot remove call, as %bitcast2 is captured via the return value.
+; Cannot remove call, as %a2 is captured via the return value.
define i8 @test_neg_unrelated_capture_used_via_return() {
; CHECK-LABEL: @test_neg_unrelated_capture_used_via_return(
; CHECK-NEXT: [[A:%.*]] = alloca i32, align 4
; CHECK-NEXT: [[A2:%.*]] = alloca i32, align 4
-; CHECK-NEXT: [[BITCAST:%.*]] = bitcast i32* [[A]] to i8*
-; CHECK-NEXT: [[BITCAST2:%.*]] = bitcast i32* [[A2]] to i8*
-; CHECK-NEXT: [[CAPTURE:%.*]] = call i8* @f3(i8* nocapture nonnull writeonly [[BITCAST]], i8* nonnull readonly [[BITCAST2]]) #[[ATTR3]]
-; CHECK-NEXT: [[V:%.*]] = load i8, i8* [[CAPTURE]], align 1
+; CHECK-NEXT: [[CAPTURE:%.*]] = call ptr @f3(ptr nocapture nonnull writeonly [[A]], ptr nonnull readonly [[A2]]) #[[ATTR3]]
+; CHECK-NEXT: [[V:%.*]] = load i8, ptr [[CAPTURE]], align 1
; CHECK-NEXT: ret i8 [[V]]
;
%a = alloca i32, align 4
%a2 = alloca i32, align 4
- %bitcast = bitcast i32* %a to i8*
- %bitcast2 = bitcast i32* %a2 to i8*
- %capture = call i8* @f3(i8* nocapture writeonly %bitcast, i8* readonly %bitcast2) argmemonly nounwind willreturn
- %v = load i8, i8* %capture
+ %capture = call ptr @f3(ptr nocapture writeonly %a, ptr readonly %a2) argmemonly nounwind willreturn
+ %v = load i8, ptr %capture
ret i8 %v
}
; CHECK-NEXT: ret void
;
%a = alloca i32, align 4
- %bitcast = bitcast i32* %a to i8*
- call void @f2(i8* nocapture writeonly %bitcast, i8* nocapture readonly %bitcast) argmemonly nounwind willreturn
+ call void @f2(ptr nocapture writeonly %a, ptr nocapture readonly %a) argmemonly nounwind willreturn
ret void
}
ret <2 x i32> %trunc
}
-define void @trunc_shl_31_i32_i64_multi_use(i64 %val, i32 addrspace(1)* %ptr0, i64 addrspace(1)* %ptr1) {
+define void @trunc_shl_31_i32_i64_multi_use(i64 %val, ptr addrspace(1) %ptr0, ptr addrspace(1) %ptr1) {
; CHECK-LABEL: @trunc_shl_31_i32_i64_multi_use(
; CHECK-NEXT: [[SHL:%.*]] = shl i64 [[VAL:%.*]], 31
; CHECK-NEXT: [[TRUNC:%.*]] = trunc i64 [[SHL]] to i32
-; CHECK-NEXT: store volatile i32 [[TRUNC]], i32 addrspace(1)* [[PTR0:%.*]], align 4
-; CHECK-NEXT: store volatile i64 [[SHL]], i64 addrspace(1)* [[PTR1:%.*]], align 8
+; CHECK-NEXT: store volatile i32 [[TRUNC]], ptr addrspace(1) [[PTR0:%.*]], align 4
+; CHECK-NEXT: store volatile i64 [[SHL]], ptr addrspace(1) [[PTR1:%.*]], align 8
; CHECK-NEXT: ret void
;
%shl = shl i64 %val, 31
%trunc = trunc i64 %shl to i32
- store volatile i32 %trunc, i32 addrspace(1)* %ptr0
- store volatile i64 %shl, i64 addrspace(1)* %ptr1
+ store volatile i32 %trunc, ptr addrspace(1) %ptr0
+ store volatile i64 %shl, ptr addrspace(1) %ptr1
ret void
}
; Don't narrow if it would lose information about the dereferenceable range of the pointer.
-define i32 @truncload_no_deref(i64* %ptr) {
+define i32 @truncload_no_deref(ptr %ptr) {
; CHECK-LABEL: @truncload_no_deref(
-; CHECK-NEXT: [[X:%.*]] = load i64, i64* [[PTR:%.*]], align 4
+; CHECK-NEXT: [[X:%.*]] = load i64, ptr [[PTR:%.*]], align 4
; CHECK-NEXT: [[R:%.*]] = trunc i64 [[X]] to i32
; CHECK-NEXT: ret i32 [[R]]
;
- %x = load i64, i64* %ptr
+ %x = load i64, ptr %ptr
%r = trunc i64 %x to i32
ret i32 %r
}
-define i32 @truncload_small_deref(i64* dereferenceable(7) %ptr) {
+define i32 @truncload_small_deref(ptr dereferenceable(7) %ptr) {
; CHECK-LABEL: @truncload_small_deref(
-; CHECK-NEXT: [[X:%.*]] = load i64, i64* [[PTR:%.*]], align 4
+; CHECK-NEXT: [[X:%.*]] = load i64, ptr [[PTR:%.*]], align 4
; CHECK-NEXT: [[R:%.*]] = trunc i64 [[X]] to i32
; CHECK-NEXT: ret i32 [[R]]
;
- %x = load i64, i64* %ptr
+ %x = load i64, ptr %ptr
%r = trunc i64 %x to i32
ret i32 %r
}
; On little-endian, we can narrow the load without an offset.
-define i32 @truncload_deref(i64* dereferenceable(8) %ptr) {
+define i32 @truncload_deref(ptr dereferenceable(8) %ptr) {
; CHECK-LABEL: @truncload_deref(
-; CHECK-NEXT: [[X:%.*]] = load i64, i64* [[PTR:%.*]], align 4
+; CHECK-NEXT: [[X:%.*]] = load i64, ptr [[PTR:%.*]], align 4
; CHECK-NEXT: [[R:%.*]] = trunc i64 [[X]] to i32
; CHECK-NEXT: ret i32 [[R]]
;
- %x = load i64, i64* %ptr
+ %x = load i64, ptr %ptr
%r = trunc i64 %x to i32
ret i32 %r
}
; Preserve alignment.
-define i16 @truncload_align(i32* dereferenceable(14) %ptr) {
+define i16 @truncload_align(ptr dereferenceable(14) %ptr) {
; CHECK-LABEL: @truncload_align(
-; CHECK-NEXT: [[X:%.*]] = load i32, i32* [[PTR:%.*]], align 16
+; CHECK-NEXT: [[X:%.*]] = load i32, ptr [[PTR:%.*]], align 16
; CHECK-NEXT: [[R:%.*]] = trunc i32 [[X]] to i16
; CHECK-NEXT: ret i16 [[R]]
;
- %x = load i32, i32* %ptr, align 16
+ %x = load i32, ptr %ptr, align 16
%r = trunc i32 %x to i16
ret i16 %r
}
declare void @use(i64)
-define i32 @truncload_extra_use(i64* dereferenceable(100) %ptr) {
+define i32 @truncload_extra_use(ptr dereferenceable(100) %ptr) {
; CHECK-LABEL: @truncload_extra_use(
-; CHECK-NEXT: [[X:%.*]] = load i64, i64* [[PTR:%.*]], align 2
+; CHECK-NEXT: [[X:%.*]] = load i64, ptr [[PTR:%.*]], align 2
; CHECK-NEXT: call void @use(i64 [[X]])
; CHECK-NEXT: [[R:%.*]] = trunc i64 [[X]] to i32
; CHECK-NEXT: ret i32 [[R]]
;
- %x = load i64, i64* %ptr, align 2
+ %x = load i64, ptr %ptr, align 2
call void @use(i64 %x)
%r = trunc i64 %x to i32
ret i32 %r
; Negative test - don't create a load if the type is not allowed by the data-layout.
-define i8 @truncload_type(i64* dereferenceable(9) %ptr) {
+define i8 @truncload_type(ptr dereferenceable(9) %ptr) {
; CHECK-LABEL: @truncload_type(
-; CHECK-NEXT: [[X:%.*]] = load i64, i64* [[PTR:%.*]], align 2
+; CHECK-NEXT: [[X:%.*]] = load i64, ptr [[PTR:%.*]], align 2
; CHECK-NEXT: [[R:%.*]] = trunc i64 [[X]] to i8
; CHECK-NEXT: ret i8 [[R]]
;
- %x = load i64, i64* %ptr, align 2
+ %x = load i64, ptr %ptr, align 2
%r = trunc i64 %x to i8
ret i8 %r
}
; Negative test - don't transform volatiles.
-define i32 @truncload_volatile(i64* dereferenceable(8) %ptr) {
+define i32 @truncload_volatile(ptr dereferenceable(8) %ptr) {
; CHECK-LABEL: @truncload_volatile(
-; CHECK-NEXT: [[X:%.*]] = load volatile i64, i64* [[PTR:%.*]], align 8
+; CHECK-NEXT: [[X:%.*]] = load volatile i64, ptr [[PTR:%.*]], align 8
; CHECK-NEXT: [[R:%.*]] = trunc i64 [[X]] to i32
; CHECK-NEXT: ret i32 [[R]]
;
- %x = load volatile i64, i64* %ptr, align 8
+ %x = load volatile i64, ptr %ptr, align 8
%r = trunc i64 %x to i32
ret i32 %r
}
; Preserve address space.
-define i32 @truncload_address_space(i64 addrspace(1)* dereferenceable(8) %ptr) {
+define i32 @truncload_address_space(ptr addrspace(1) dereferenceable(8) %ptr) {
; CHECK-LABEL: @truncload_address_space(
-; CHECK-NEXT: [[X:%.*]] = load i64, i64 addrspace(1)* [[PTR:%.*]], align 4
+; CHECK-NEXT: [[X:%.*]] = load i64, ptr addrspace(1) [[PTR:%.*]], align 4
; CHECK-NEXT: [[R:%.*]] = trunc i64 [[X]] to i32
; CHECK-NEXT: ret i32 [[R]]
;
- %x = load i64, i64 addrspace(1)* %ptr, align 4
+ %x = load i64, ptr addrspace(1) %ptr, align 4
%r = trunc i64 %x to i32
ret i32 %r
}
ret <2 x i32> %trunc
}
-define void @trunc_shl_31_i32_i64_multi_use(i64 %val, i32 addrspace(1)* %ptr0, i64 addrspace(1)* %ptr1) {
+define void @trunc_shl_31_i32_i64_multi_use(i64 %val, ptr addrspace(1) %ptr0, ptr addrspace(1) %ptr1) {
; CHECK-LABEL: @trunc_shl_31_i32_i64_multi_use(
; CHECK-NEXT: [[SHL:%.*]] = shl i64 [[VAL:%.*]], 31
; CHECK-NEXT: [[TRUNC:%.*]] = trunc i64 [[SHL]] to i32
-; CHECK-NEXT: store volatile i32 [[TRUNC]], i32 addrspace(1)* [[PTR0:%.*]], align 4
-; CHECK-NEXT: store volatile i64 [[SHL]], i64 addrspace(1)* [[PTR1:%.*]], align 8
+; CHECK-NEXT: store volatile i32 [[TRUNC]], ptr addrspace(1) [[PTR0:%.*]], align 4
+; CHECK-NEXT: store volatile i64 [[SHL]], ptr addrspace(1) [[PTR1:%.*]], align 8
; CHECK-NEXT: ret void
;
%shl = shl i64 %val, 31
%trunc = trunc i64 %shl to i32
- store volatile i32 %trunc, i32 addrspace(1)* %ptr0
- store volatile i64 %shl, i64 addrspace(1)* %ptr1
+ store volatile i32 %trunc, ptr addrspace(1) %ptr0
+ store volatile i64 %shl, ptr addrspace(1) %ptr1
ret void
}
}
; Type punning to an array of pointers.
-define i32* @type_pun_pointer(<16 x i8> %in) {
+define ptr @type_pun_pointer(<16 x i8> %in) {
; CHECK-LABEL: @type_pun_pointer(
; CHECK-NEXT: [[SROA_BC:%.*]] = bitcast <16 x i8> [[IN:%.*]] to <4 x i32>
; CHECK-NEXT: [[SROA_EXTRACT:%.*]] = extractelement <4 x i32> [[SROA_BC]], i64 0
-; CHECK-NEXT: [[TMP1:%.*]] = inttoptr i32 [[SROA_EXTRACT]] to i32*
-; CHECK-NEXT: ret i32* [[TMP1]]
+; CHECK-NEXT: [[TMP1:%.*]] = inttoptr i32 [[SROA_EXTRACT]] to ptr
+; CHECK-NEXT: ret ptr [[TMP1]]
;
%sroa = shufflevector <16 x i8> %in, <16 x i8> poison, <4 x i32> <i32 0, i32 1, i32 2, i32 3>
%1 = bitcast <4 x i8> %sroa to i32
- %2 = inttoptr i32 %1 to i32*
- ret i32* %2
+ %2 = inttoptr i32 %1 to ptr
+ ret ptr %2
}
; Type punning to an array of 32-bit floating-point values.
}
; Type punning to an array of pointers.
-define i32* @type_pun_pointer(<16 x i8> %in) {
+define ptr @type_pun_pointer(<16 x i8> %in) {
; CHECK-LABEL: @type_pun_pointer(
; CHECK-NEXT: [[SROA_BC:%.*]] = bitcast <16 x i8> [[IN:%.*]] to <4 x i32>
; CHECK-NEXT: [[SROA_EXTRACT:%.*]] = extractelement <4 x i32> [[SROA_BC]], i64 0
-; CHECK-NEXT: [[TMP1:%.*]] = inttoptr i32 [[SROA_EXTRACT]] to i32*
-; CHECK-NEXT: ret i32* [[TMP1]]
+; CHECK-NEXT: [[TMP1:%.*]] = inttoptr i32 [[SROA_EXTRACT]] to ptr
+; CHECK-NEXT: ret ptr [[TMP1]]
;
%sroa = shufflevector <16 x i8> %in, <16 x i8> undef, <4 x i32> <i32 0, i32 1, i32 2, i32 3>
%1 = bitcast <4 x i8> %sroa to i32
- %2 = inttoptr i32 %1 to i32*
- ret i32* %2
+ %2 = inttoptr i32 %1 to ptr
+ ret ptr %2
}
; Type punning to an array of 32-bit floating-point values.
; icmp canonicalization should be consistent for these cases.
; Either the compare depends on the sum or not.
-define i1 @uaddo_1(i8 %x, i8* %p) {
+define i1 @uaddo_1(i8 %x, ptr %p) {
; CHECK-LABEL: @uaddo_1(
; CHECK-NEXT: [[A:%.*]] = add i8 [[X:%.*]], 1
-; CHECK-NEXT: store i8 [[A]], i8* [[P:%.*]], align 1
+; CHECK-NEXT: store i8 [[A]], ptr [[P:%.*]], align 1
; CHECK-NEXT: [[C:%.*]] = icmp eq i8 [[A]], 0
; CHECK-NEXT: ret i1 [[C]]
;
%a = add i8 %x, 1
- store i8 %a, i8* %p
+ store i8 %a, ptr %p
%c = icmp ult i8 %a, 1
ret i1 %c
}
-define i1 @uaddo_neg1(i8 %x, i8* %p) {
+define i1 @uaddo_neg1(i8 %x, ptr %p) {
; CHECK-LABEL: @uaddo_neg1(
; CHECK-NEXT: [[A:%.*]] = add i8 [[X:%.*]], -1
-; CHECK-NEXT: store i8 [[A]], i8* [[P:%.*]], align 1
+; CHECK-NEXT: store i8 [[A]], ptr [[P:%.*]], align 1
; CHECK-NEXT: [[C:%.*]] = icmp ne i8 [[X]], 0
; CHECK-NEXT: ret i1 [[C]]
;
%a = add i8 %x, -1
- store i8 %a, i8* %p
+ store i8 %a, ptr %p
%c = icmp ne i8 %a, -1
ret i1 %c
}
define i32 @PR30366(i1 %a) {
; CHECK-LABEL: @PR30366(
; CHECK-NEXT: [[Z:%.*]] = zext i1 [[A:%.*]] to i32
-; CHECK-NEXT: [[D:%.*]] = lshr i32 [[Z]], zext (i16 ptrtoint ([1 x i16]* @b to i16) to i32)
+; CHECK-NEXT: [[D:%.*]] = lshr i32 [[Z]], zext (i16 ptrtoint (ptr @b to i16) to i32)
; CHECK-NEXT: ret i32 [[D]]
;
%z = zext i1 %a to i32
- %d = udiv i32 %z, zext (i16 shl (i16 1, i16 ptrtoint ([1 x i16]* @b to i16)) to i32)
+ %d = udiv i32 %z, zext (i16 shl (i16 1, i16 ptrtoint (ptr @b to i16)) to i32)
ret i32 %d
}
%B22 = add i177 %B9, %B13
%B1 = udiv i177 %B5, %B6
%C9 = icmp ult i177 %Y, %B22
- store i1 %C9, i1* undef
+ store i1 %C9, ptr undef
ret i177 %B1
}
define i32 @udiv_constexpr(i8 %a) {
; CHECK-LABEL: @udiv_constexpr(
-; CHECK-NEXT: [[TMP1:%.*]] = udiv i8 [[A:%.*]], ptrtoint ([1 x i8]* @b to i8)
+; CHECK-NEXT: [[TMP1:%.*]] = udiv i8 [[A:%.*]], ptrtoint (ptr @b to i8)
; CHECK-NEXT: [[D:%.*]] = zext i8 [[TMP1]] to i32
; CHECK-NEXT: ret i32 [[D]]
;
%za = zext i8 %a to i32
- %d = udiv i32 %za, zext (i8 ptrtoint ([1 x i8]* @b to i8) to i32)
+ %d = udiv i32 %za, zext (i8 ptrtoint (ptr @b to i8) to i32)
ret i32 %d
}
define i32 @udiv_const_constexpr(i8 %a) {
; CHECK-LABEL: @udiv_const_constexpr(
-; CHECK-NEXT: [[D:%.*]] = udiv i32 42, zext (i8 ptrtoint ([1 x i8]* @g1 to i8) to i32)
+; CHECK-NEXT: [[D:%.*]] = udiv i32 42, zext (i8 ptrtoint (ptr @g1 to i8) to i32)
; CHECK-NEXT: ret i32 [[D]]
;
- %d = udiv i32 42, zext (i8 ptrtoint ([1 x i8]* @g1 to i8) to i32)
+ %d = udiv i32 42, zext (i8 ptrtoint (ptr @g1 to i8) to i32)
ret i32 %d
}
define i32 @urem_const_constexpr(i8 %a) {
; CHECK-LABEL: @urem_const_constexpr(
-; CHECK-NEXT: [[D:%.*]] = urem i32 42, zext (i8 ptrtoint ([1 x i8]* @g2 to i8) to i32)
+; CHECK-NEXT: [[D:%.*]] = urem i32 42, zext (i8 ptrtoint (ptr @g2 to i8) to i32)
; CHECK-NEXT: ret i32 [[D]]
;
- %d = urem i32 42, zext (i8 ptrtoint ([1 x i8]* @g2 to i8) to i32)
+ %d = urem i32 42, zext (i8 ptrtoint (ptr @g2 to i8) to i32)
ret i32 %d
}
define i32 @udiv_constexpr_const(i8 %a) {
; CHECK-LABEL: @udiv_constexpr_const(
-; CHECK-NEXT: [[D:%.*]] = udiv i32 zext (i8 ptrtoint ([1 x i8]* @g3 to i8) to i32), 42
+; CHECK-NEXT: [[D:%.*]] = udiv i32 zext (i8 ptrtoint (ptr @g3 to i8) to i32), 42
; CHECK-NEXT: ret i32 [[D]]
;
- %d = udiv i32 zext (i8 ptrtoint ([1 x i8]* @g3 to i8) to i32), 42
+ %d = udiv i32 zext (i8 ptrtoint (ptr @g3 to i8) to i32), 42
ret i32 %d
}
define i32 @urem_constexpr_const(i8 %a) {
; CHECK-LABEL: @urem_constexpr_const(
-; CHECK-NEXT: [[D:%.*]] = urem i32 zext (i8 ptrtoint ([1 x i8]* @g4 to i8) to i32), 42
+; CHECK-NEXT: [[D:%.*]] = urem i32 zext (i8 ptrtoint (ptr @g4 to i8) to i32), 42
; CHECK-NEXT: ret i32 [[D]]
;
- %d = urem i32 zext (i8 ptrtoint ([1 x i8]* @g4 to i8) to i32), 42
+ %d = urem i32 zext (i8 ptrtoint (ptr @g4 to i8) to i32), 42
ret i32 %d
}
declare { i64, i1 } @llvm.umul.with.overflow.i64(i64, i64) #0
-define i1 @test1(i64 %a, i64 %b, i64* %ptr) {
+define i1 @test1(i64 %a, i64 %b, ptr %ptr) {
; CHECK-LABEL: @test1(
; CHECK-NEXT: [[MUL:%.*]] = mul i64 [[A:%.*]], [[B:%.*]]
; CHECK-NEXT: [[TMP1:%.*]] = icmp ne i64 [[A]], 0
; CHECK-NEXT: [[TMP2:%.*]] = icmp ne i64 [[B]], 0
; CHECK-NEXT: [[OVERFLOW_1:%.*]] = and i1 [[TMP1]], [[TMP2]]
-; CHECK-NEXT: store i64 [[MUL]], i64* [[PTR:%.*]], align 8
+; CHECK-NEXT: store i64 [[MUL]], ptr [[PTR:%.*]], align 8
; CHECK-NEXT: ret i1 [[OVERFLOW_1]]
;
%mul = extractvalue { i64, i1 } %res, 0
%cmp = icmp ne i64 %mul, 0
%overflow.1 = or i1 %overflow, %cmp
- store i64 %mul, i64* %ptr, align 8
+ store i64 %mul, ptr %ptr, align 8
ret i1 %overflow.1
}
-define i1 @test1_logical(i64 %a, i64 %b, i64* %ptr) {
+define i1 @test1_logical(i64 %a, i64 %b, ptr %ptr) {
; CHECK-LABEL: @test1_logical(
; CHECK-NEXT: [[MUL:%.*]] = mul i64 [[A:%.*]], [[B:%.*]]
; CHECK-NEXT: [[TMP1:%.*]] = icmp ne i64 [[A]], 0
; CHECK-NEXT: [[TMP2:%.*]] = icmp ne i64 [[B]], 0
; CHECK-NEXT: [[OVERFLOW_1:%.*]] = and i1 [[TMP1]], [[TMP2]]
-; CHECK-NEXT: store i64 [[MUL]], i64* [[PTR:%.*]], align 8
+; CHECK-NEXT: store i64 [[MUL]], ptr [[PTR:%.*]], align 8
; CHECK-NEXT: ret i1 [[OVERFLOW_1]]
;
%mul = extractvalue { i64, i1 } %res, 0
%cmp = icmp ne i64 %mul, 0
%overflow.1 = select i1 %overflow, i1 true, i1 %cmp
- store i64 %mul, i64* %ptr, align 8
+ store i64 %mul, ptr %ptr, align 8
ret i1 %overflow.1
}
-define i1 @test1_or_ops_swapped(i64 %a, i64 %b, i64* %ptr) {
+define i1 @test1_or_ops_swapped(i64 %a, i64 %b, ptr %ptr) {
; CHECK-LABEL: @test1_or_ops_swapped(
; CHECK-NEXT: [[MUL:%.*]] = mul i64 [[A:%.*]], [[B:%.*]]
; CHECK-NEXT: [[TMP1:%.*]] = icmp ne i64 [[A]], 0
; CHECK-NEXT: [[TMP2:%.*]] = icmp ne i64 [[B]], 0
; CHECK-NEXT: [[OVERFLOW_1:%.*]] = and i1 [[TMP1]], [[TMP2]]
-; CHECK-NEXT: store i64 [[MUL]], i64* [[PTR:%.*]], align 8
+; CHECK-NEXT: store i64 [[MUL]], ptr [[PTR:%.*]], align 8
; CHECK-NEXT: ret i1 [[OVERFLOW_1]]
;
%mul = extractvalue { i64, i1 } %res, 0
%cmp = icmp ne i64 %mul, 0
%overflow.1 = or i1 %cmp, %overflow
- store i64 %mul, i64* %ptr, align 8
+ store i64 %mul, ptr %ptr, align 8
ret i1 %overflow.1
}
-define i1 @test1_or_ops_swapped_logical(i64 %a, i64 %b, i64* %ptr) {
+define i1 @test1_or_ops_swapped_logical(i64 %a, i64 %b, ptr %ptr) {
; CHECK-LABEL: @test1_or_ops_swapped_logical(
; CHECK-NEXT: [[MUL:%.*]] = mul i64 [[A:%.*]], [[B:%.*]]
; CHECK-NEXT: [[TMP1:%.*]] = icmp ne i64 [[A]], 0
; CHECK-NEXT: [[TMP2:%.*]] = icmp ne i64 [[B]], 0
; CHECK-NEXT: [[OVERFLOW_1:%.*]] = and i1 [[TMP1]], [[TMP2]]
-; CHECK-NEXT: store i64 [[MUL]], i64* [[PTR:%.*]], align 8
+; CHECK-NEXT: store i64 [[MUL]], ptr [[PTR:%.*]], align 8
; CHECK-NEXT: ret i1 [[OVERFLOW_1]]
;
%mul = extractvalue { i64, i1 } %res, 0
%cmp = icmp ne i64 %mul, 0
%overflow.1 = select i1 %cmp, i1 true, i1 %overflow
- store i64 %mul, i64* %ptr, align 8
+ store i64 %mul, ptr %ptr, align 8
ret i1 %overflow.1
}
-define i1 @test2(i64 %a, i64 %b, i64* %ptr) {
+define i1 @test2(i64 %a, i64 %b, ptr %ptr) {
; CHECK-LABEL: @test2(
; CHECK-NEXT: [[MUL:%.*]] = mul i64 [[A:%.*]], [[B:%.*]]
; CHECK-NEXT: [[TMP1:%.*]] = icmp ne i64 [[A]], 0
; CHECK-NEXT: [[TMP2:%.*]] = icmp ne i64 [[B]], 0
; CHECK-NEXT: [[OVERFLOW_1:%.*]] = and i1 [[TMP1]], [[TMP2]]
; CHECK-NEXT: [[NEG:%.*]] = sub i64 0, [[MUL]]
-; CHECK-NEXT: store i64 [[NEG]], i64* [[PTR:%.*]], align 8
+; CHECK-NEXT: store i64 [[NEG]], ptr [[PTR:%.*]], align 8
; CHECK-NEXT: ret i1 [[OVERFLOW_1]]
;
%cmp = icmp ne i64 %mul, 0
%overflow.1 = or i1 %overflow, %cmp
%neg = sub i64 0, %mul
- store i64 %neg, i64* %ptr, align 8
+ store i64 %neg, ptr %ptr, align 8
ret i1 %overflow.1
}
-define i1 @test2_logical(i64 %a, i64 %b, i64* %ptr) {
+define i1 @test2_logical(i64 %a, i64 %b, ptr %ptr) {
; CHECK-LABEL: @test2_logical(
; CHECK-NEXT: [[MUL:%.*]] = mul i64 [[A:%.*]], [[B:%.*]]
; CHECK-NEXT: [[TMP1:%.*]] = icmp ne i64 [[A]], 0
; CHECK-NEXT: [[TMP2:%.*]] = icmp ne i64 [[B]], 0
; CHECK-NEXT: [[OVERFLOW_1:%.*]] = and i1 [[TMP1]], [[TMP2]]
; CHECK-NEXT: [[NEG:%.*]] = sub i64 0, [[MUL]]
-; CHECK-NEXT: store i64 [[NEG]], i64* [[PTR:%.*]], align 8
+; CHECK-NEXT: store i64 [[NEG]], ptr [[PTR:%.*]], align 8
; CHECK-NEXT: ret i1 [[OVERFLOW_1]]
;
%cmp = icmp ne i64 %mul, 0
%overflow.1 = select i1 %overflow, i1 true, i1 %cmp
%neg = sub i64 0, %mul
- store i64 %neg, i64* %ptr, align 8
+ store i64 %neg, ptr %ptr, align 8
ret i1 %overflow.1
}
declare void @use(i1)
-define i1 @test3_multiple_overflow_users(i64 %a, i64 %b, i64* %ptr) {
+define i1 @test3_multiple_overflow_users(i64 %a, i64 %b, ptr %ptr) {
; CHECK-LABEL: @test3_multiple_overflow_users(
; CHECK-NEXT: [[RES:%.*]] = tail call { i64, i1 } @llvm.umul.with.overflow.i64(i64 [[A:%.*]], i64 [[B:%.*]])
; CHECK-NEXT: [[OVERFLOW:%.*]] = extractvalue { i64, i1 } [[RES]], 1
ret i1 %overflow.1
}
-define i1 @test3_multiple_overflow_users_logical(i64 %a, i64 %b, i64* %ptr) {
+define i1 @test3_multiple_overflow_users_logical(i64 %a, i64 %b, ptr %ptr) {
; CHECK-LABEL: @test3_multiple_overflow_users_logical(
; CHECK-NEXT: [[RES:%.*]] = tail call { i64, i1 } @llvm.umul.with.overflow.i64(i64 [[A:%.*]], i64 [[B:%.*]])
; CHECK-NEXT: [[OVERFLOW:%.*]] = extractvalue { i64, i1 } [[RES]], 1
}
; Do not simplify if %overflow and %mul have multiple uses.
-define i1 @test3_multiple_overflow_and_mul_users(i64 %a, i64 %b, i64* %ptr) {
+define i1 @test3_multiple_overflow_and_mul_users(i64 %a, i64 %b, ptr %ptr) {
; CHECK-LABEL: @test3_multiple_overflow_and_mul_users(
; CHECK-NEXT: [[RES:%.*]] = tail call { i64, i1 } @llvm.umul.with.overflow.i64(i64 [[A:%.*]], i64 [[B:%.*]])
; CHECK-NEXT: [[OVERFLOW:%.*]] = extractvalue { i64, i1 } [[RES]], 1
; CHECK-NEXT: [[CMP:%.*]] = icmp ne i64 [[MUL]], 0
; CHECK-NEXT: [[OVERFLOW_1:%.*]] = or i1 [[OVERFLOW]], [[CMP]]
; CHECK-NEXT: [[NEG:%.*]] = sub i64 0, [[MUL]]
-; CHECK-NEXT: store i64 [[NEG]], i64* [[PTR:%.*]], align 8
+; CHECK-NEXT: store i64 [[NEG]], ptr [[PTR:%.*]], align 8
; CHECK-NEXT: call void @use(i1 [[OVERFLOW]])
; CHECK-NEXT: ret i1 [[OVERFLOW_1]]
;
%cmp = icmp ne i64 %mul, 0
%overflow.1 = or i1 %overflow, %cmp
%neg = sub i64 0, %mul
- store i64 %neg, i64* %ptr, align 8
+ store i64 %neg, ptr %ptr, align 8
call void @use(i1 %overflow)
ret i1 %overflow.1
}
-define i1 @test3_multiple_overflow_and_mul_users_logical(i64 %a, i64 %b, i64* %ptr) {
+define i1 @test3_multiple_overflow_and_mul_users_logical(i64 %a, i64 %b, ptr %ptr) {
; CHECK-LABEL: @test3_multiple_overflow_and_mul_users_logical(
; CHECK-NEXT: [[RES:%.*]] = tail call { i64, i1 } @llvm.umul.with.overflow.i64(i64 [[A:%.*]], i64 [[B:%.*]])
; CHECK-NEXT: [[OVERFLOW:%.*]] = extractvalue { i64, i1 } [[RES]], 1
; CHECK-NEXT: [[CMP:%.*]] = icmp ne i64 [[MUL]], 0
; CHECK-NEXT: [[OVERFLOW_1:%.*]] = or i1 [[OVERFLOW]], [[CMP]]
; CHECK-NEXT: [[NEG:%.*]] = sub i64 0, [[MUL]]
-; CHECK-NEXT: store i64 [[NEG]], i64* [[PTR:%.*]], align 8
+; CHECK-NEXT: store i64 [[NEG]], ptr [[PTR:%.*]], align 8
; CHECK-NEXT: call void @use(i1 [[OVERFLOW]])
; CHECK-NEXT: ret i1 [[OVERFLOW_1]]
;
%cmp = icmp ne i64 %mul, 0
%overflow.1 = select i1 %overflow, i1 true, i1 %cmp
%neg = sub i64 0, %mul
- store i64 %neg, i64* %ptr, align 8
+ store i64 %neg, ptr %ptr, align 8
call void @use(i1 %overflow)
ret i1 %overflow.1
}
declare void @use.2({ i64, i1 })
-define i1 @test3_multiple_res_users(i64 %a, i64 %b, i64* %ptr) {
+define i1 @test3_multiple_res_users(i64 %a, i64 %b, ptr %ptr) {
; CHECK-LABEL: @test3_multiple_res_users(
; CHECK-NEXT: [[RES:%.*]] = tail call { i64, i1 } @llvm.umul.with.overflow.i64(i64 [[A:%.*]], i64 [[B:%.*]])
; CHECK-NEXT: [[MUL:%.*]] = extractvalue { i64, i1 } [[RES]], 0
; CHECK-NEXT: [[TMP2:%.*]] = icmp ne i64 [[B]], 0
; CHECK-NEXT: [[OVERFLOW_1:%.*]] = and i1 [[TMP1]], [[TMP2]]
; CHECK-NEXT: [[NEG:%.*]] = sub i64 0, [[MUL]]
-; CHECK-NEXT: store i64 [[NEG]], i64* [[PTR:%.*]], align 8
+; CHECK-NEXT: store i64 [[NEG]], ptr [[PTR:%.*]], align 8
; CHECK-NEXT: call void @use.2({ i64, i1 } [[RES]])
; CHECK-NEXT: ret i1 [[OVERFLOW_1]]
;
%cmp = icmp ne i64 %mul, 0
%overflow.1 = or i1 %overflow, %cmp
%neg = sub i64 0, %mul
- store i64 %neg, i64* %ptr, align 8
+ store i64 %neg, ptr %ptr, align 8
call void @use.2({ i64, i1 } %res)
ret i1 %overflow.1
}
-define i1 @test3_multiple_res_users_logical(i64 %a, i64 %b, i64* %ptr) {
+define i1 @test3_multiple_res_users_logical(i64 %a, i64 %b, ptr %ptr) {
; CHECK-LABEL: @test3_multiple_res_users_logical(
; CHECK-NEXT: [[RES:%.*]] = tail call { i64, i1 } @llvm.umul.with.overflow.i64(i64 [[A:%.*]], i64 [[B:%.*]])
; CHECK-NEXT: [[MUL:%.*]] = extractvalue { i64, i1 } [[RES]], 0
; CHECK-NEXT: [[TMP2:%.*]] = icmp ne i64 [[B]], 0
; CHECK-NEXT: [[OVERFLOW_1:%.*]] = and i1 [[TMP1]], [[TMP2]]
; CHECK-NEXT: [[NEG:%.*]] = sub i64 0, [[MUL]]
-; CHECK-NEXT: store i64 [[NEG]], i64* [[PTR:%.*]], align 8
+; CHECK-NEXT: store i64 [[NEG]], ptr [[PTR:%.*]], align 8
; CHECK-NEXT: call void @use.2({ i64, i1 } [[RES]])
; CHECK-NEXT: ret i1 [[OVERFLOW_1]]
;
%cmp = icmp ne i64 %mul, 0
%overflow.1 = select i1 %overflow, i1 true, i1 %cmp
%neg = sub i64 0, %mul
- store i64 %neg, i64* %ptr, align 8
+ store i64 %neg, ptr %ptr, align 8
call void @use.2({ i64, i1 } %res)
ret i1 %overflow.1
}
declare void @use.3(i64)
; Simplify if %mul has multiple uses.
-define i1 @test3_multiple_mul_users(i64 %a, i64 %b, i64* %ptr) {
+define i1 @test3_multiple_mul_users(i64 %a, i64 %b, ptr %ptr) {
; CHECK-LABEL: @test3_multiple_mul_users(
; CHECK-NEXT: [[MUL:%.*]] = mul i64 [[A:%.*]], [[B:%.*]]
; CHECK-NEXT: [[TMP1:%.*]] = icmp ne i64 [[A]], 0
; CHECK-NEXT: [[TMP2:%.*]] = icmp ne i64 [[B]], 0
; CHECK-NEXT: [[OVERFLOW_1:%.*]] = and i1 [[TMP1]], [[TMP2]]
; CHECK-NEXT: [[NEG:%.*]] = sub i64 0, [[MUL]]
-; CHECK-NEXT: store i64 [[NEG]], i64* [[PTR:%.*]], align 8
+; CHECK-NEXT: store i64 [[NEG]], ptr [[PTR:%.*]], align 8
; CHECK-NEXT: call void @use.3(i64 [[MUL]])
; CHECK-NEXT: ret i1 [[OVERFLOW_1]]
;
%cmp = icmp ne i64 %mul, 0
%overflow.1 = or i1 %overflow, %cmp
%neg = sub i64 0, %mul
- store i64 %neg, i64* %ptr, align 8
+ store i64 %neg, ptr %ptr, align 8
call void @use.3(i64 %mul)
ret i1 %overflow.1
}
-define i1 @test3_multiple_mul_users_logical(i64 %a, i64 %b, i64* %ptr) {
+define i1 @test3_multiple_mul_users_logical(i64 %a, i64 %b, ptr %ptr) {
; CHECK-LABEL: @test3_multiple_mul_users_logical(
; CHECK-NEXT: [[MUL:%.*]] = mul i64 [[A:%.*]], [[B:%.*]]
; CHECK-NEXT: [[TMP1:%.*]] = icmp ne i64 [[A]], 0
; CHECK-NEXT: [[TMP2:%.*]] = icmp ne i64 [[B]], 0
; CHECK-NEXT: [[OVERFLOW_1:%.*]] = and i1 [[TMP1]], [[TMP2]]
; CHECK-NEXT: [[NEG:%.*]] = sub i64 0, [[MUL]]
-; CHECK-NEXT: store i64 [[NEG]], i64* [[PTR:%.*]], align 8
+; CHECK-NEXT: store i64 [[NEG]], ptr [[PTR:%.*]], align 8
; CHECK-NEXT: call void @use.3(i64 [[MUL]])
; CHECK-NEXT: ret i1 [[OVERFLOW_1]]
;
%cmp = icmp ne i64 %mul, 0
%overflow.1 = select i1 %overflow, i1 true, i1 %cmp
%neg = sub i64 0, %mul
- store i64 %neg, i64* %ptr, align 8
+ store i64 %neg, ptr %ptr, align 8
call void @use.3(i64 %mul)
ret i1 %overflow.1
}
-define i1 @test4_no_icmp_ne(i64 %a, i64 %b, i64* %ptr) {
+define i1 @test4_no_icmp_ne(i64 %a, i64 %b, ptr %ptr) {
; CHECK-LABEL: @test4_no_icmp_ne(
; CHECK-NEXT: [[RES:%.*]] = tail call { i64, i1 } @llvm.umul.with.overflow.i64(i64 [[A:%.*]], i64 [[B:%.*]])
; CHECK-NEXT: [[OVERFLOW:%.*]] = extractvalue { i64, i1 } [[RES]], 1
; CHECK-NEXT: [[CMP:%.*]] = icmp sgt i64 [[MUL]], 0
; CHECK-NEXT: [[OVERFLOW_1:%.*]] = or i1 [[OVERFLOW]], [[CMP]]
; CHECK-NEXT: [[NEG:%.*]] = sub i64 0, [[MUL]]
-; CHECK-NEXT: store i64 [[NEG]], i64* [[PTR:%.*]], align 8
+; CHECK-NEXT: store i64 [[NEG]], ptr [[PTR:%.*]], align 8
; CHECK-NEXT: ret i1 [[OVERFLOW_1]]
;
%res = tail call { i64, i1 } @llvm.umul.with.overflow.i64(i64 %a, i64 %b)
%cmp = icmp sgt i64 %mul, 0
%overflow.1 = or i1 %overflow, %cmp
%neg = sub i64 0, %mul
- store i64 %neg, i64* %ptr, align 8
+ store i64 %neg, ptr %ptr, align 8
ret i1 %overflow.1
}
-define i1 @test4_no_icmp_ne_logical(i64 %a, i64 %b, i64* %ptr) {
+define i1 @test4_no_icmp_ne_logical(i64 %a, i64 %b, ptr %ptr) {
; CHECK-LABEL: @test4_no_icmp_ne_logical(
; CHECK-NEXT: [[RES:%.*]] = tail call { i64, i1 } @llvm.umul.with.overflow.i64(i64 [[A:%.*]], i64 [[B:%.*]])
; CHECK-NEXT: [[OVERFLOW:%.*]] = extractvalue { i64, i1 } [[RES]], 1
; CHECK-NEXT: [[CMP:%.*]] = icmp sgt i64 [[MUL]], 0
; CHECK-NEXT: [[OVERFLOW_1:%.*]] = or i1 [[OVERFLOW]], [[CMP]]
; CHECK-NEXT: [[NEG:%.*]] = sub i64 0, [[MUL]]
-; CHECK-NEXT: store i64 [[NEG]], i64* [[PTR:%.*]], align 8
+; CHECK-NEXT: store i64 [[NEG]], ptr [[PTR:%.*]], align 8
; CHECK-NEXT: ret i1 [[OVERFLOW_1]]
;
%res = tail call { i64, i1 } @llvm.umul.with.overflow.i64(i64 %a, i64 %b)
%cmp = icmp sgt i64 %mul, 0
%overflow.1 = select i1 %overflow, i1 true, i1 %cmp
%neg = sub i64 0, %mul
- store i64 %neg, i64* %ptr, align 8
+ store i64 %neg, ptr %ptr, align 8
ret i1 %overflow.1
}
define i32 @main() local_unnamed_addr #0 !dbg !26 {
entry:
- %0 = load i8, i8* @e, align 1, !dbg !31, !tbaa !32
+ %0 = load i8, ptr @e, align 1, !dbg !31, !tbaa !32
%conv = sext i8 %0 to i32, !dbg !31
- store i32 %conv, i32* @c, align 4, !dbg !35, !tbaa !36
+ store i32 %conv, ptr @c, align 4, !dbg !35, !tbaa !36
call void @llvm.dbg.value(metadata i32 -1372423381, metadata !30, metadata !DIExpression()), !dbg !38
%call = call signext i8 @b(i32 6), !dbg !39
%conv1 = sext i8 %call to i32, !dbg !39
call void @llvm.dbg.value(metadata i32 %conv1, metadata !30, metadata !DIExpression()), !dbg !38
- %1 = load i32, i32* @d, align 4, !dbg !40, !tbaa !36
+ %1 = load i32, ptr @d, align 4, !dbg !40, !tbaa !36
%call2 = call i32 (...) @optimize_me_not(), !dbg !41
ret i32 0, !dbg !42
}
; RUN: opt < %s -passes=instcombine -S | FileCheck %s
; Fold
-; ((%x * %y) u/ %x) == %y
+; ((ptr %y) u/ %x) == %y
; to
; @llvm.umul.with.overflow(%x, %y) + extractvalue + not
; RUN: opt < %s -passes=instcombine -S | FileCheck %s
; Fold
-; ((%x * %y) u/ %x) != %y
+; ((ptr %y) u/ %x) != %y
; to
; @llvm.umul.with.overflow(%x, %y) + extractvalue
target datalayout = "e-m:e-i64:64-f80:128-n8:16:32:64-S128"
target triple = "x86_64-unknown-linux-gnu"
-define i32 @main(i32 %argc, i8** %argv) #0 {
+define i32 @main(i32 %argc, ptr %argv) #0 {
; CHECK-LABEL: define {{[^@]+}}@main
-; CHECK-SAME: (i32 [[ARGC:%.*]], i8** nocapture readonly [[ARGV:%.*]]) local_unnamed_addr #[[ATTR0:[0-9]+]] {
+; CHECK-SAME: (i32 [[ARGC:%.*]], ptr nocapture readonly [[ARGV:%.*]]) local_unnamed_addr #[[ATTR0:[0-9]+]] {
; CHECK-NEXT: entry:
; CHECK-NEXT: [[TMP0:%.*]] = icmp slt i32 [[ARGC]], 2
; CHECK-NEXT: [[SPEC_SELECT:%.*]] = select i1 [[TMP0]], i32 0, i32 [[ARGC]]
; CHECK-NEXT: ret i32 [[SPEC_SELECT]]
;
entry:
- %0 = getelementptr inbounds i8*, i8** %argv, i32 0
- %ptr = load i8*, i8** %0
- %1 = call i32 @compute(i8* %ptr, i32 %argc)
- %2 = icmp slt i32 %argc, 2
- br i1 %2, label %done, label %do_work
+ %ptr = load ptr, ptr %argv
+ %0 = call i32 @compute(ptr %ptr, i32 %argc)
+ %1 = icmp slt i32 %argc, 2
+ br i1 %1, label %done, label %do_work
do_work:
- %3 = icmp eq i8* %ptr, null
- br i1 %3, label %null, label %done
+ %2 = icmp eq ptr %ptr, null
+ br i1 %2, label %null, label %done
null:
- call void @call_if_null(i8* %ptr)
+ call void @call_if_null(ptr %ptr)
br label %done
done:
- %retval = phi i32 [0, %entry], [%1, %do_work], [%1, %null]
+ %retval = phi i32 [0, %entry], [%0, %do_work], [%0, %null]
ret i32 %retval
}
-define i32 @compute(i8* noundef nonnull %ptr, i32 %x) #1 {
+define i32 @compute(ptr noundef nonnull %ptr, i32 %x) #1 {
; CHECK-LABEL: define {{[^@]+}}@compute
-; CHECK-SAME: (i8* nocapture noundef nonnull readnone [[PTR:%.*]], i32 returned [[X:%.*]])
+; CHECK-SAME: (ptr nocapture noundef nonnull readnone [[PTR:%.*]], i32 returned [[X:%.*]])
; CHECK-SAME: local_unnamed_addr #[[ATTR1:[0-9]+]] {
; CHECK-NEXT: ret i32 [[X]]
;
ret i32 %x
}
-declare void @call_if_null(i8* %ptr) #0
+declare void @call_if_null(ptr %ptr) #0
attributes #0 = { nounwind }
attributes #1 = { noinline nounwind readonly }
unreachable
}
-define dso_local void @test() personality i8* bitcast (i32 (...)* @__gxx_personality_v0 to i8*) {
+define dso_local void @test() personality ptr @__gxx_personality_v0 {
entry:
; CHECK: define dso_local void @test()
; CHECK-NEXT: entry:
ret void
lpad:
-; CHECK: %0 = landingpad { i8*, i32 }
-; CHECK: resume { i8*, i32 } %0
+; CHECK: %0 = landingpad { ptr, i32 }
+; CHECK: resume { ptr, i32 } %0
- %0 = landingpad { i8*, i32 }
+ %0 = landingpad { ptr, i32 }
cleanup
- call void (i8*, ...) @printf(i8* getelementptr inbounds ([7 x i8], [7 x i8]* @.str.2, i64 0, i64 0))
- resume { i8*, i32 } %0
+ call void (ptr, ...) @printf(ptr @.str.2)
+ resume { ptr, i32 } %0
}
declare dso_local i32 @__gxx_personality_v0(...)
-declare dso_local void @printf(i8*, ...)
+declare dso_local void @printf(ptr, ...)
br label %bb15
bb15:
- %iftmp.0.0 = phi i8* [ getelementptr ([5 x i8], [5 x i8]* @.str1, i32 0, i32 0), %bb14 ], [ getelementptr ([5 x i8], [5 x i8]* @.str, i32 0, i32 0), %bb ]
- %tmp17 = call i32 (i8*, ...) @printf(i8* %iftmp.0.0) nounwind
+ %iftmp.0.0 = phi ptr [ @.str1, %bb14 ], [ @.str, %bb ]
+ %tmp17 = call i32 (ptr, ...) @printf(ptr %iftmp.0.0) nounwind
ret i32 0
}
declare i32 @func_11()
-declare i32 @printf(i8*, ...) nounwind
+declare i32 @printf(ptr, ...) nounwind
; RUN: opt < %s -passes=instcombine -instcombine-infinite-loop-threshold=3 -S | FileCheck %s
-%struct.__va_list = type { i8*, i8*, i8*, i32, i32 }
+%struct.__va_list = type { ptr, ptr, ptr, i32, i32 }
-declare void @llvm.lifetime.start.p0i8(i64, i8* nocapture)
-declare void @llvm.lifetime.end.p0i8(i64, i8* nocapture)
-declare void @llvm.va_start(i8*)
-declare void @llvm.va_end(i8*)
-declare void @llvm.va_copy(i8*, i8*)
+declare void @llvm.lifetime.start.p0(i64, ptr nocapture)
+declare void @llvm.lifetime.end.p0(i64, ptr nocapture)
+declare void @llvm.va_start(ptr)
+declare void @llvm.va_end(ptr)
+declare void @llvm.va_copy(ptr, ptr)
-define i32 @func(i8* nocapture readnone %fmt, ...) {
+define i32 @func(ptr nocapture readnone %fmt, ...) {
; CHECK-LABEL: @func(
; CHECK: entry:
; CHECK-NEXT: ret i32 0
entry:
%va0 = alloca %struct.__va_list, align 8
%va1 = alloca %struct.__va_list, align 8
- %0 = bitcast %struct.__va_list* %va0 to i8*
- %1 = bitcast %struct.__va_list* %va1 to i8*
- call void @llvm.lifetime.start.p0i8(i64 32, i8* %0)
- call void @llvm.va_start(i8* %0)
- call void @llvm.lifetime.start.p0i8(i64 32, i8* %1)
- call void @llvm.va_copy(i8* %1, i8* %0)
- call void @llvm.va_end(i8* %1)
- call void @llvm.lifetime.end.p0i8(i64 32, i8* %1)
- call void @llvm.va_end(i8* %0)
- call void @llvm.lifetime.end.p0i8(i64 32, i8* %0)
+ call void @llvm.lifetime.start.p0(i64 32, ptr %va0)
+ call void @llvm.va_start(ptr %va0)
+ call void @llvm.lifetime.start.p0(i64 32, ptr %va1)
+ call void @llvm.va_copy(ptr %va1, ptr %va0)
+ call void @llvm.va_end(ptr %va1)
+ call void @llvm.lifetime.end.p0(i64 32, ptr %va1)
+ call void @llvm.va_end(ptr %va0)
+ call void @llvm.lifetime.end.p0(i64 32, ptr %va0)
ret i32 0
}
define void @get_image() nounwind {
; CHECK-LABEL: @get_image(
; CHECK-NEXT: entry:
-; CHECK-NEXT: [[TMP0:%.*]] = call i32 @fgetc(i8* null) #[[ATTR0:[0-9]+]]
+; CHECK-NEXT: [[TMP0:%.*]] = call i32 @fgetc(ptr null) #[[ATTR0:[0-9]+]]
; CHECK-NEXT: br i1 false, label [[BB2:%.*]], label [[BB3:%.*]]
; CHECK: bb2:
; CHECK-NEXT: br label [[BB3]]
; CHECK-NEXT: unreachable
;
entry:
- %0 = call i32 @fgetc(i8* null) nounwind
+ %0 = call i32 @fgetc(ptr null) nounwind
%1 = trunc i32 %0 to i8
%t2 = insertelement <100 x i8> zeroinitializer, i8 %1, i32 1
%t1 = extractelement <100 x i8> %t2, i32 0
}
; PR4340
-define void @vac(<4 x float>* nocapture %a) nounwind {
+define void @vac(ptr nocapture %a) nounwind {
; CHECK-LABEL: @vac(
; CHECK-NEXT: entry:
-; CHECK-NEXT: store <4 x float> zeroinitializer, <4 x float>* [[A:%.*]], align 16
+; CHECK-NEXT: store <4 x float> zeroinitializer, ptr [[A:%.*]], align 16
; CHECK-NEXT: ret void
;
entry:
- %t1 = load <4 x float>, <4 x float>* %a ; <<4 x float>> [#uses=1]
+ %t1 = load <4 x float>, ptr %a ; <<4 x float>> [#uses=1]
%vecins = insertelement <4 x float> %t1, float 0.000000e+00, i32 0 ; <<4 x float>> [#uses=1]
%vecins4 = insertelement <4 x float> %vecins, float 0.000000e+00, i32 1; <<4 x float>> [#uses=1]
%vecins6 = insertelement <4 x float> %vecins4, float 0.000000e+00, i32 2; <<4 x float>> [#uses=1]
%vecins8 = insertelement <4 x float> %vecins6, float 0.000000e+00, i32 3; <<4 x float>> [#uses=1]
- store <4 x float> %vecins8, <4 x float>* %a
+ store <4 x float> %vecins8, ptr %a
ret void
}
-declare i32 @fgetc(i8*)
+declare i32 @fgetc(ptr)
define <4 x float> @dead_shuffle_elt(<4 x float> %x, <2 x float> %y) nounwind {
; CHECK-LABEL: @dead_shuffle_elt(
ret <3 x float> %r
}
-define i32* @gep_vbase_w_s_idx(<2 x i32*> %base, i64 %index) {
+define ptr @gep_vbase_w_s_idx(<2 x ptr> %base, i64 %index) {
; CHECK-LABEL: @gep_vbase_w_s_idx(
-; CHECK-NEXT: [[TMP1:%.*]] = extractelement <2 x i32*> [[BASE:%.*]], i64 1
-; CHECK-NEXT: [[EE:%.*]] = getelementptr i32, i32* [[TMP1]], i64 [[INDEX:%.*]]
-; CHECK-NEXT: ret i32* [[EE]]
+; CHECK-NEXT: [[TMP1:%.*]] = extractelement <2 x ptr> [[BASE:%.*]], i64 1
+; CHECK-NEXT: [[EE:%.*]] = getelementptr i32, ptr [[TMP1]], i64 [[INDEX:%.*]]
+; CHECK-NEXT: ret ptr [[EE]]
;
- %gep = getelementptr i32, <2 x i32*> %base, i64 %index
- %ee = extractelement <2 x i32*> %gep, i32 1
- ret i32* %ee
+ %gep = getelementptr i32, <2 x ptr> %base, i64 %index
+ %ee = extractelement <2 x ptr> %gep, i32 1
+ ret ptr %ee
}
-define i32* @gep_splat_base_w_s_idx(i32* %base) {
+define ptr @gep_splat_base_w_s_idx(ptr %base) {
; CHECK-LABEL: @gep_splat_base_w_s_idx(
-; CHECK-NEXT: [[EE:%.*]] = getelementptr i32, i32* [[BASE:%.*]], i64 1
-; CHECK-NEXT: ret i32* [[EE]]
+; CHECK-NEXT: [[EE:%.*]] = getelementptr i32, ptr [[BASE:%.*]], i64 1
+; CHECK-NEXT: ret ptr [[EE]]
;
- %basevec1 = insertelement <2 x i32*> poison, i32* %base, i32 0
- %basevec2 = shufflevector <2 x i32*> %basevec1, <2 x i32*> poison, <2 x i32> zeroinitializer
- %gep = getelementptr i32, <2 x i32*> %basevec2, i64 1
- %ee = extractelement <2 x i32*> %gep, i32 1
- ret i32* %ee
+ %basevec1 = insertelement <2 x ptr> poison, ptr %base, i32 0
+ %basevec2 = shufflevector <2 x ptr> %basevec1, <2 x ptr> poison, <2 x i32> zeroinitializer
+ %gep = getelementptr i32, <2 x ptr> %basevec2, i64 1
+ %ee = extractelement <2 x ptr> %gep, i32 1
+ ret ptr %ee
}
-define i32* @gep_splat_base_w_cv_idx(i32* %base) {
+define ptr @gep_splat_base_w_cv_idx(ptr %base) {
; CHECK-LABEL: @gep_splat_base_w_cv_idx(
-; CHECK-NEXT: [[BASEVEC2:%.*]] = insertelement <2 x i32*> poison, i32* [[BASE:%.*]], i64 1
-; CHECK-NEXT: [[GEP:%.*]] = getelementptr i32, <2 x i32*> [[BASEVEC2]], <2 x i64> <i64 poison, i64 1>
-; CHECK-NEXT: [[EE:%.*]] = extractelement <2 x i32*> [[GEP]], i64 1
-; CHECK-NEXT: ret i32* [[EE]]
+; CHECK-NEXT: [[BASEVEC2:%.*]] = insertelement <2 x ptr> poison, ptr [[BASE:%.*]], i64 1
+; CHECK-NEXT: [[GEP:%.*]] = getelementptr i32, <2 x ptr> [[BASEVEC2]], <2 x i64> <i64 poison, i64 1>
+; CHECK-NEXT: [[EE:%.*]] = extractelement <2 x ptr> [[GEP]], i64 1
+; CHECK-NEXT: ret ptr [[EE]]
;
- %basevec1 = insertelement <2 x i32*> poison, i32* %base, i32 0
- %basevec2 = shufflevector <2 x i32*> %basevec1, <2 x i32*> poison, <2 x i32> zeroinitializer
- %gep = getelementptr i32, <2 x i32*> %basevec2, <2 x i64> <i64 0, i64 1>
- %ee = extractelement <2 x i32*> %gep, i32 1
- ret i32* %ee
+ %basevec1 = insertelement <2 x ptr> poison, ptr %base, i32 0
+ %basevec2 = shufflevector <2 x ptr> %basevec1, <2 x ptr> poison, <2 x i32> zeroinitializer
+ %gep = getelementptr i32, <2 x ptr> %basevec2, <2 x i64> <i64 0, i64 1>
+ %ee = extractelement <2 x ptr> %gep, i32 1
+ ret ptr %ee
}
-define i32* @gep_splat_base_w_vidx(i32* %base, <2 x i64> %idxvec) {
+define ptr @gep_splat_base_w_vidx(ptr %base, <2 x i64> %idxvec) {
; CHECK-LABEL: @gep_splat_base_w_vidx(
-; CHECK-NEXT: [[BASEVEC2:%.*]] = insertelement <2 x i32*> poison, i32* [[BASE:%.*]], i64 1
-; CHECK-NEXT: [[GEP:%.*]] = getelementptr i32, <2 x i32*> [[BASEVEC2]], <2 x i64> [[IDXVEC:%.*]]
-; CHECK-NEXT: [[EE:%.*]] = extractelement <2 x i32*> [[GEP]], i64 1
-; CHECK-NEXT: ret i32* [[EE]]
+; CHECK-NEXT: [[BASEVEC2:%.*]] = insertelement <2 x ptr> poison, ptr [[BASE:%.*]], i64 1
+; CHECK-NEXT: [[GEP:%.*]] = getelementptr i32, <2 x ptr> [[BASEVEC2]], <2 x i64> [[IDXVEC:%.*]]
+; CHECK-NEXT: [[EE:%.*]] = extractelement <2 x ptr> [[GEP]], i64 1
+; CHECK-NEXT: ret ptr [[EE]]
;
- %basevec1 = insertelement <2 x i32*> poison, i32* %base, i32 0
- %basevec2 = shufflevector <2 x i32*> %basevec1, <2 x i32*> poison, <2 x i32> zeroinitializer
- %gep = getelementptr i32, <2 x i32*> %basevec2, <2 x i64> %idxvec
- %ee = extractelement <2 x i32*> %gep, i32 1
- ret i32* %ee
+ %basevec1 = insertelement <2 x ptr> poison, ptr %base, i32 0
+ %basevec2 = shufflevector <2 x ptr> %basevec1, <2 x ptr> poison, <2 x i32> zeroinitializer
+ %gep = getelementptr i32, <2 x ptr> %basevec2, <2 x i64> %idxvec
+ %ee = extractelement <2 x ptr> %gep, i32 1
+ ret ptr %ee
}
@GLOBAL = internal global i32 zeroinitializer
-define i32* @gep_cvbase_w_s_idx(<2 x i32*> %base, i64 %raw_addr) {
+define ptr @gep_cvbase_w_s_idx(<2 x ptr> %base, i64 %raw_addr) {
; CHECK-LABEL: @gep_cvbase_w_s_idx(
-; CHECK-NEXT: [[EE:%.*]] = getelementptr i32, i32* @GLOBAL, i64 [[RAW_ADDR:%.*]]
-; CHECK-NEXT: ret i32* [[EE]]
+; CHECK-NEXT: [[EE:%.*]] = getelementptr i32, ptr @GLOBAL, i64 [[RAW_ADDR:%.*]]
+; CHECK-NEXT: ret ptr [[EE]]
;
- %gep = getelementptr i32, <2 x i32*> <i32* @GLOBAL, i32* @GLOBAL>, i64 %raw_addr
- %ee = extractelement <2 x i32*> %gep, i32 1
- ret i32* %ee
+ %gep = getelementptr i32, <2 x ptr> <ptr @GLOBAL, ptr @GLOBAL>, i64 %raw_addr
+ %ee = extractelement <2 x ptr> %gep, i32 1
+ ret ptr %ee
}
-define i32* @gep_cvbase_w_cv_idx(<2 x i32*> %base, i64 %raw_addr) {
+define ptr @gep_cvbase_w_cv_idx(<2 x ptr> %base, i64 %raw_addr) {
; CHECK-LABEL: @gep_cvbase_w_cv_idx(
-; CHECK-NEXT: ret i32* getelementptr inbounds (i32, i32* @GLOBAL, i64 1)
+; CHECK-NEXT: ret ptr getelementptr inbounds (i32, ptr @GLOBAL, i64 1)
;
- %gep = getelementptr i32, <2 x i32*> <i32* @GLOBAL, i32* @GLOBAL>, <2 x i64> <i64 0, i64 1>
- %ee = extractelement <2 x i32*> %gep, i32 1
- ret i32* %ee
+ %gep = getelementptr i32, <2 x ptr> <ptr @GLOBAL, ptr @GLOBAL>, <2 x i64> <i64 0, i64 1>
+ %ee = extractelement <2 x ptr> %gep, i32 1
+ ret ptr %ee
}
-define i32* @gep_sbase_w_cv_idx(i32* %base) {
+define ptr @gep_sbase_w_cv_idx(ptr %base) {
; CHECK-LABEL: @gep_sbase_w_cv_idx(
-; CHECK-NEXT: [[EE:%.*]] = getelementptr i32, i32* [[BASE:%.*]], i64 1
-; CHECK-NEXT: ret i32* [[EE]]
+; CHECK-NEXT: [[EE:%.*]] = getelementptr i32, ptr [[BASE:%.*]], i64 1
+; CHECK-NEXT: ret ptr [[EE]]
;
- %gep = getelementptr i32, i32* %base, <2 x i64> <i64 0, i64 1>
- %ee = extractelement <2 x i32*> %gep, i32 1
- ret i32* %ee
+ %gep = getelementptr i32, ptr %base, <2 x i64> <i64 0, i64 1>
+ %ee = extractelement <2 x ptr> %gep, i32 1
+ ret ptr %ee
}
-define i32* @gep_sbase_w_splat_idx(i32* %base, i64 %idx) {
+define ptr @gep_sbase_w_splat_idx(ptr %base, i64 %idx) {
; CHECK-LABEL: @gep_sbase_w_splat_idx(
-; CHECK-NEXT: [[EE:%.*]] = getelementptr i32, i32* [[BASE:%.*]], i64 [[IDX:%.*]]
-; CHECK-NEXT: ret i32* [[EE]]
+; CHECK-NEXT: [[EE:%.*]] = getelementptr i32, ptr [[BASE:%.*]], i64 [[IDX:%.*]]
+; CHECK-NEXT: ret ptr [[EE]]
;
%idxvec1 = insertelement <2 x i64> poison, i64 %idx, i32 0
%idxvec2 = shufflevector <2 x i64> %idxvec1, <2 x i64> poison, <2 x i32> zeroinitializer
- %gep = getelementptr i32, i32* %base, <2 x i64> %idxvec2
- %ee = extractelement <2 x i32*> %gep, i32 1
- ret i32* %ee
+ %gep = getelementptr i32, ptr %base, <2 x i64> %idxvec2
+ %ee = extractelement <2 x ptr> %gep, i32 1
+ ret ptr %ee
}
-define i32* @gep_splat_both(i32* %base, i64 %idx) {
+define ptr @gep_splat_both(ptr %base, i64 %idx) {
; CHECK-LABEL: @gep_splat_both(
-; CHECK-NEXT: [[BASEVEC2:%.*]] = insertelement <2 x i32*> poison, i32* [[BASE:%.*]], i64 1
+; CHECK-NEXT: [[BASEVEC2:%.*]] = insertelement <2 x ptr> poison, ptr [[BASE:%.*]], i64 1
; CHECK-NEXT: [[IDXVEC2:%.*]] = insertelement <2 x i64> poison, i64 [[IDX:%.*]], i64 1
-; CHECK-NEXT: [[GEP:%.*]] = getelementptr i32, <2 x i32*> [[BASEVEC2]], <2 x i64> [[IDXVEC2]]
-; CHECK-NEXT: [[EE:%.*]] = extractelement <2 x i32*> [[GEP]], i64 1
-; CHECK-NEXT: ret i32* [[EE]]
+; CHECK-NEXT: [[GEP:%.*]] = getelementptr i32, <2 x ptr> [[BASEVEC2]], <2 x i64> [[IDXVEC2]]
+; CHECK-NEXT: [[EE:%.*]] = extractelement <2 x ptr> [[GEP]], i64 1
+; CHECK-NEXT: ret ptr [[EE]]
;
- %basevec1 = insertelement <2 x i32*> poison, i32* %base, i32 0
- %basevec2 = shufflevector <2 x i32*> %basevec1, <2 x i32*> poison, <2 x i32> zeroinitializer
+ %basevec1 = insertelement <2 x ptr> poison, ptr %base, i32 0
+ %basevec2 = shufflevector <2 x ptr> %basevec1, <2 x ptr> poison, <2 x i32> zeroinitializer
%idxvec1 = insertelement <2 x i64> poison, i64 %idx, i32 0
%idxvec2 = shufflevector <2 x i64> %idxvec1, <2 x i64> poison, <2 x i32> zeroinitializer
- %gep = getelementptr i32, <2 x i32*> %basevec2, <2 x i64> %idxvec2
- %ee = extractelement <2 x i32*> %gep, i32 1
- ret i32* %ee
+ %gep = getelementptr i32, <2 x ptr> %basevec2, <2 x i64> %idxvec2
+ %ee = extractelement <2 x ptr> %gep, i32 1
+ ret ptr %ee
}
-define <2 x i32*> @gep_all_lanes_undef(i32* %base, i64 %idx) {;
+define <2 x ptr> @gep_all_lanes_undef(ptr %base, i64 %idx) {;
; CHECK-LABEL: @gep_all_lanes_undef(
-; CHECK-NEXT: [[BASEVEC:%.*]] = insertelement <2 x i32*> poison, i32* [[BASE:%.*]], i64 0
+; CHECK-NEXT: [[BASEVEC:%.*]] = insertelement <2 x ptr> poison, ptr [[BASE:%.*]], i64 0
; CHECK-NEXT: [[IDXVEC:%.*]] = insertelement <2 x i64> poison, i64 [[IDX:%.*]], i64 1
-; CHECK-NEXT: [[GEP:%.*]] = getelementptr i32, <2 x i32*> [[BASEVEC]], <2 x i64> [[IDXVEC]]
-; CHECK-NEXT: ret <2 x i32*> [[GEP]]
+; CHECK-NEXT: [[GEP:%.*]] = getelementptr i32, <2 x ptr> [[BASEVEC]], <2 x i64> [[IDXVEC]]
+; CHECK-NEXT: ret <2 x ptr> [[GEP]]
;
- %basevec = insertelement <2 x i32*> poison, i32* %base, i32 0
+ %basevec = insertelement <2 x ptr> poison, ptr %base, i32 0
%idxvec = insertelement <2 x i64> poison, i64 %idx, i32 1
- %gep = getelementptr i32, <2 x i32*> %basevec, <2 x i64> %idxvec
- ret <2 x i32*> %gep
+ %gep = getelementptr i32, <2 x ptr> %basevec, <2 x i64> %idxvec
+ ret <2 x ptr> %gep
}
-define i32* @gep_demanded_lane_undef(i32* %base, i64 %idx) {
+define ptr @gep_demanded_lane_undef(ptr %base, i64 %idx) {
; CHECK-LABEL: @gep_demanded_lane_undef(
-; CHECK-NEXT: ret i32* undef
+; CHECK-NEXT: ret ptr undef
;
- %basevec = insertelement <2 x i32*> poison, i32* %base, i32 0
+ %basevec = insertelement <2 x ptr> poison, ptr %base, i32 0
%idxvec = insertelement <2 x i64> poison, i64 %idx, i32 1
- %gep = getelementptr i32, <2 x i32*> %basevec, <2 x i64> %idxvec
- %ee = extractelement <2 x i32*> %gep, i32 1
- ret i32* %ee
+ %gep = getelementptr i32, <2 x ptr> %basevec, <2 x i64> %idxvec
+ %ee = extractelement <2 x ptr> %gep, i32 1
+ ret ptr %ee
}
;; LangRef has an odd quirk around FCAs which make it illegal to use undef
;; indices.
-define i32* @PR41624(<2 x { i32, i32 }*> %a) {
+define ptr @PR41624(<2 x ptr> %a) {
; CHECK-LABEL: @PR41624(
-; CHECK-NEXT: [[W:%.*]] = getelementptr { i32, i32 }, <2 x { i32, i32 }*> [[A:%.*]], <2 x i64> <i64 5, i64 5>, <2 x i32> zeroinitializer
-; CHECK-NEXT: [[R:%.*]] = extractelement <2 x i32*> [[W]], i64 0
-; CHECK-NEXT: ret i32* [[R]]
+; CHECK-NEXT: [[W:%.*]] = getelementptr { i32, i32 }, <2 x ptr> [[A:%.*]], <2 x i64> <i64 5, i64 5>, <2 x i32> zeroinitializer
+; CHECK-NEXT: [[R:%.*]] = extractelement <2 x ptr> [[W]], i64 0
+; CHECK-NEXT: ret ptr [[R]]
;
- %w = getelementptr { i32, i32 }, <2 x { i32, i32 }*> %a, <2 x i64> <i64 5, i64 5>, <2 x i32> zeroinitializer
- %r = extractelement <2 x i32*> %w, i32 0
- ret i32* %r
+ %w = getelementptr { i32, i32 }, <2 x ptr> %a, <2 x i64> <i64 5, i64 5>, <2 x i32> zeroinitializer
+ %r = extractelement <2 x ptr> %w, i32 0
+ ret ptr %r
}
@global = external global [0 x i32], align 4
; Make sure we don't get stuck in a loop turning the zeroinitializer into
; <0, undef, undef, undef> and then changing it back.
-define i32* @zero_sized_type_extract(<4 x i64> %arg, i64 %arg1) {
+define ptr @zero_sized_type_extract(<4 x i64> %arg, i64 %arg1) {
; CHECK-LABEL: @zero_sized_type_extract(
; CHECK-NEXT: bb:
-; CHECK-NEXT: [[T:%.*]] = getelementptr inbounds [0 x i32], <4 x [0 x i32]*> <[0 x i32]* @global, [0 x i32]* poison, [0 x i32]* poison, [0 x i32]* poison>, <4 x i64> <i64 0, i64 poison, i64 poison, i64 poison>, <4 x i64> [[ARG:%.*]]
-; CHECK-NEXT: [[T2:%.*]] = extractelement <4 x i32*> [[T]], i64 0
-; CHECK-NEXT: ret i32* [[T2]]
+; CHECK-NEXT: [[T:%.*]] = getelementptr inbounds [0 x i32], <4 x ptr> <ptr @global, ptr poison, ptr poison, ptr poison>, <4 x i64> <i64 0, i64 poison, i64 poison, i64 poison>, <4 x i64> [[ARG:%.*]]
+; CHECK-NEXT: [[T2:%.*]] = extractelement <4 x ptr> [[T]], i64 0
+; CHECK-NEXT: ret ptr [[T2]]
;
bb:
- %t = getelementptr inbounds [0 x i32], <4 x [0 x i32]*> <[0 x i32]* @global, [0 x i32]* @global, [0 x i32]* @global, [0 x i32]* @global>, <4 x i64> zeroinitializer, <4 x i64> %arg
- %t2 = extractelement <4 x i32*> %t, i64 0
- ret i32* %t2
+ %t = getelementptr inbounds [0 x i32], <4 x ptr> <ptr @global, ptr @global, ptr @global, ptr @global>, <4 x i64> zeroinitializer, <4 x i64> %arg
+ %t2 = extractelement <4 x ptr> %t, i64 0
+ ret ptr %t2
}
; The non-zero elements of the result are always 'y', so the splat is unnecessary.
define void @get_image() nounwind {
; CHECK-LABEL: @get_image(
; CHECK-NEXT: entry:
-; CHECK-NEXT: [[TMP0:%.*]] = call i32 @fgetc(i8* null) #[[ATTR0:[0-9]+]]
+; CHECK-NEXT: [[TMP0:%.*]] = call i32 @fgetc(ptr null) #[[ATTR0:[0-9]+]]
; CHECK-NEXT: br i1 false, label [[BB2:%.*]], label [[BB3:%.*]]
; CHECK: bb2:
; CHECK-NEXT: br label [[BB3]]
; CHECK-NEXT: unreachable
;
entry:
- %0 = call i32 @fgetc(i8* null) nounwind
+ %0 = call i32 @fgetc(ptr null) nounwind
%1 = trunc i32 %0 to i8
%t2 = insertelement <100 x i8> zeroinitializer, i8 %1, i32 1
%t1 = extractelement <100 x i8> %t2, i32 0
}
; PR4340
-define void @vac(<4 x float>* nocapture %a) nounwind {
+define void @vac(ptr nocapture %a) nounwind {
; CHECK-LABEL: @vac(
; CHECK-NEXT: entry:
-; CHECK-NEXT: store <4 x float> zeroinitializer, <4 x float>* [[A:%.*]], align 16
+; CHECK-NEXT: store <4 x float> zeroinitializer, ptr [[A:%.*]], align 16
; CHECK-NEXT: ret void
;
entry:
- %t1 = load <4 x float>, <4 x float>* %a ; <<4 x float>> [#uses=1]
+ %t1 = load <4 x float>, ptr %a ; <<4 x float>> [#uses=1]
%vecins = insertelement <4 x float> %t1, float 0.000000e+00, i32 0 ; <<4 x float>> [#uses=1]
%vecins4 = insertelement <4 x float> %vecins, float 0.000000e+00, i32 1; <<4 x float>> [#uses=1]
%vecins6 = insertelement <4 x float> %vecins4, float 0.000000e+00, i32 2; <<4 x float>> [#uses=1]
%vecins8 = insertelement <4 x float> %vecins6, float 0.000000e+00, i32 3; <<4 x float>> [#uses=1]
- store <4 x float> %vecins8, <4 x float>* %a
+ store <4 x float> %vecins8, ptr %a
ret void
}
-declare i32 @fgetc(i8*)
+declare i32 @fgetc(ptr)
define <4 x float> @dead_shuffle_elt(<4 x float> %x, <2 x float> %y) nounwind {
; CHECK-LABEL: @dead_shuffle_elt(
ret <3 x float> %r
}
-define i32* @gep_vbase_w_s_idx(<2 x i32*> %base, i64 %index) {
+define ptr @gep_vbase_w_s_idx(<2 x ptr> %base, i64 %index) {
; CHECK-LABEL: @gep_vbase_w_s_idx(
-; CHECK-NEXT: [[TMP1:%.*]] = extractelement <2 x i32*> [[BASE:%.*]], i64 1
-; CHECK-NEXT: [[EE:%.*]] = getelementptr i32, i32* [[TMP1]], i64 [[INDEX:%.*]]
-; CHECK-NEXT: ret i32* [[EE]]
+; CHECK-NEXT: [[TMP1:%.*]] = extractelement <2 x ptr> [[BASE:%.*]], i64 1
+; CHECK-NEXT: [[EE:%.*]] = getelementptr i32, ptr [[TMP1]], i64 [[INDEX:%.*]]
+; CHECK-NEXT: ret ptr [[EE]]
;
- %gep = getelementptr i32, <2 x i32*> %base, i64 %index
- %ee = extractelement <2 x i32*> %gep, i32 1
- ret i32* %ee
+ %gep = getelementptr i32, <2 x ptr> %base, i64 %index
+ %ee = extractelement <2 x ptr> %gep, i32 1
+ ret ptr %ee
}
-define i32* @gep_splat_base_w_s_idx(i32* %base) {
+define ptr @gep_splat_base_w_s_idx(ptr %base) {
; CHECK-LABEL: @gep_splat_base_w_s_idx(
-; CHECK-NEXT: [[EE:%.*]] = getelementptr i32, i32* [[BASE:%.*]], i64 1
-; CHECK-NEXT: ret i32* [[EE]]
+; CHECK-NEXT: [[EE:%.*]] = getelementptr i32, ptr [[BASE:%.*]], i64 1
+; CHECK-NEXT: ret ptr [[EE]]
;
- %basevec1 = insertelement <2 x i32*> undef, i32* %base, i32 0
- %basevec2 = shufflevector <2 x i32*> %basevec1, <2 x i32*> undef, <2 x i32> zeroinitializer
- %gep = getelementptr i32, <2 x i32*> %basevec2, i64 1
- %ee = extractelement <2 x i32*> %gep, i32 1
- ret i32* %ee
+ %basevec1 = insertelement <2 x ptr> undef, ptr %base, i32 0
+ %basevec2 = shufflevector <2 x ptr> %basevec1, <2 x ptr> undef, <2 x i32> zeroinitializer
+ %gep = getelementptr i32, <2 x ptr> %basevec2, i64 1
+ %ee = extractelement <2 x ptr> %gep, i32 1
+ ret ptr %ee
}
-define i32* @gep_splat_base_w_cv_idx(i32* %base) {
+define ptr @gep_splat_base_w_cv_idx(ptr %base) {
; CHECK-LABEL: @gep_splat_base_w_cv_idx(
-; CHECK-NEXT: [[BASEVEC2:%.*]] = insertelement <2 x i32*> undef, i32* [[BASE:%.*]], i64 1
-; CHECK-NEXT: [[GEP:%.*]] = getelementptr i32, <2 x i32*> [[BASEVEC2]], <2 x i64> <i64 poison, i64 1>
-; CHECK-NEXT: [[EE:%.*]] = extractelement <2 x i32*> [[GEP]], i64 1
-; CHECK-NEXT: ret i32* [[EE]]
+; CHECK-NEXT: [[BASEVEC2:%.*]] = insertelement <2 x ptr> undef, ptr [[BASE:%.*]], i64 1
+; CHECK-NEXT: [[GEP:%.*]] = getelementptr i32, <2 x ptr> [[BASEVEC2]], <2 x i64> <i64 poison, i64 1>
+; CHECK-NEXT: [[EE:%.*]] = extractelement <2 x ptr> [[GEP]], i64 1
+; CHECK-NEXT: ret ptr [[EE]]
;
- %basevec1 = insertelement <2 x i32*> undef, i32* %base, i32 0
- %basevec2 = shufflevector <2 x i32*> %basevec1, <2 x i32*> undef, <2 x i32> zeroinitializer
- %gep = getelementptr i32, <2 x i32*> %basevec2, <2 x i64> <i64 0, i64 1>
- %ee = extractelement <2 x i32*> %gep, i32 1
- ret i32* %ee
+ %basevec1 = insertelement <2 x ptr> undef, ptr %base, i32 0
+ %basevec2 = shufflevector <2 x ptr> %basevec1, <2 x ptr> undef, <2 x i32> zeroinitializer
+ %gep = getelementptr i32, <2 x ptr> %basevec2, <2 x i64> <i64 0, i64 1>
+ %ee = extractelement <2 x ptr> %gep, i32 1
+ ret ptr %ee
}
-define i32* @gep_splat_base_w_vidx(i32* %base, <2 x i64> %idxvec) {
+define ptr @gep_splat_base_w_vidx(ptr %base, <2 x i64> %idxvec) {
; CHECK-LABEL: @gep_splat_base_w_vidx(
-; CHECK-NEXT: [[BASEVEC2:%.*]] = insertelement <2 x i32*> undef, i32* [[BASE:%.*]], i64 1
-; CHECK-NEXT: [[GEP:%.*]] = getelementptr i32, <2 x i32*> [[BASEVEC2]], <2 x i64> [[IDXVEC:%.*]]
-; CHECK-NEXT: [[EE:%.*]] = extractelement <2 x i32*> [[GEP]], i64 1
-; CHECK-NEXT: ret i32* [[EE]]
+; CHECK-NEXT: [[BASEVEC2:%.*]] = insertelement <2 x ptr> undef, ptr [[BASE:%.*]], i64 1
+; CHECK-NEXT: [[GEP:%.*]] = getelementptr i32, <2 x ptr> [[BASEVEC2]], <2 x i64> [[IDXVEC:%.*]]
+; CHECK-NEXT: [[EE:%.*]] = extractelement <2 x ptr> [[GEP]], i64 1
+; CHECK-NEXT: ret ptr [[EE]]
;
- %basevec1 = insertelement <2 x i32*> undef, i32* %base, i32 0
- %basevec2 = shufflevector <2 x i32*> %basevec1, <2 x i32*> undef, <2 x i32> zeroinitializer
- %gep = getelementptr i32, <2 x i32*> %basevec2, <2 x i64> %idxvec
- %ee = extractelement <2 x i32*> %gep, i32 1
- ret i32* %ee
+ %basevec1 = insertelement <2 x ptr> undef, ptr %base, i32 0
+ %basevec2 = shufflevector <2 x ptr> %basevec1, <2 x ptr> undef, <2 x i32> zeroinitializer
+ %gep = getelementptr i32, <2 x ptr> %basevec2, <2 x i64> %idxvec
+ %ee = extractelement <2 x ptr> %gep, i32 1
+ ret ptr %ee
}
@GLOBAL = internal global i32 zeroinitializer
-define i32* @gep_cvbase_w_s_idx(<2 x i32*> %base, i64 %raw_addr) {
+define ptr @gep_cvbase_w_s_idx(<2 x ptr> %base, i64 %raw_addr) {
; CHECK-LABEL: @gep_cvbase_w_s_idx(
-; CHECK-NEXT: [[EE:%.*]] = getelementptr i32, i32* @GLOBAL, i64 [[RAW_ADDR:%.*]]
-; CHECK-NEXT: ret i32* [[EE]]
+; CHECK-NEXT: [[EE:%.*]] = getelementptr i32, ptr @GLOBAL, i64 [[RAW_ADDR:%.*]]
+; CHECK-NEXT: ret ptr [[EE]]
;
- %gep = getelementptr i32, <2 x i32*> <i32* @GLOBAL, i32* @GLOBAL>, i64 %raw_addr
- %ee = extractelement <2 x i32*> %gep, i32 1
- ret i32* %ee
+ %gep = getelementptr i32, <2 x ptr> <ptr @GLOBAL, ptr @GLOBAL>, i64 %raw_addr
+ %ee = extractelement <2 x ptr> %gep, i32 1
+ ret ptr %ee
}
-define i32* @gep_cvbase_w_cv_idx(<2 x i32*> %base, i64 %raw_addr) {
+define ptr @gep_cvbase_w_cv_idx(<2 x ptr> %base, i64 %raw_addr) {
; CHECK-LABEL: @gep_cvbase_w_cv_idx(
-; CHECK-NEXT: ret i32* getelementptr inbounds (i32, i32* @GLOBAL, i64 1)
+; CHECK-NEXT: ret ptr getelementptr inbounds (i32, ptr @GLOBAL, i64 1)
;
- %gep = getelementptr i32, <2 x i32*> <i32* @GLOBAL, i32* @GLOBAL>, <2 x i64> <i64 0, i64 1>
- %ee = extractelement <2 x i32*> %gep, i32 1
- ret i32* %ee
+ %gep = getelementptr i32, <2 x ptr> <ptr @GLOBAL, ptr @GLOBAL>, <2 x i64> <i64 0, i64 1>
+ %ee = extractelement <2 x ptr> %gep, i32 1
+ ret ptr %ee
}
-define i32* @gep_sbase_w_cv_idx(i32* %base) {
+define ptr @gep_sbase_w_cv_idx(ptr %base) {
; CHECK-LABEL: @gep_sbase_w_cv_idx(
-; CHECK-NEXT: [[EE:%.*]] = getelementptr i32, i32* [[BASE:%.*]], i64 1
-; CHECK-NEXT: ret i32* [[EE]]
+; CHECK-NEXT: [[EE:%.*]] = getelementptr i32, ptr [[BASE:%.*]], i64 1
+; CHECK-NEXT: ret ptr [[EE]]
;
- %gep = getelementptr i32, i32* %base, <2 x i64> <i64 0, i64 1>
- %ee = extractelement <2 x i32*> %gep, i32 1
- ret i32* %ee
+ %gep = getelementptr i32, ptr %base, <2 x i64> <i64 0, i64 1>
+ %ee = extractelement <2 x ptr> %gep, i32 1
+ ret ptr %ee
}
-define i32* @gep_sbase_w_splat_idx(i32* %base, i64 %idx) {
+define ptr @gep_sbase_w_splat_idx(ptr %base, i64 %idx) {
; CHECK-LABEL: @gep_sbase_w_splat_idx(
-; CHECK-NEXT: [[EE:%.*]] = getelementptr i32, i32* [[BASE:%.*]], i64 [[IDX:%.*]]
-; CHECK-NEXT: ret i32* [[EE]]
+; CHECK-NEXT: [[EE:%.*]] = getelementptr i32, ptr [[BASE:%.*]], i64 [[IDX:%.*]]
+; CHECK-NEXT: ret ptr [[EE]]
;
%idxvec1 = insertelement <2 x i64> undef, i64 %idx, i32 0
%idxvec2 = shufflevector <2 x i64> %idxvec1, <2 x i64> undef, <2 x i32> zeroinitializer
- %gep = getelementptr i32, i32* %base, <2 x i64> %idxvec2
- %ee = extractelement <2 x i32*> %gep, i32 1
- ret i32* %ee
+ %gep = getelementptr i32, ptr %base, <2 x i64> %idxvec2
+ %ee = extractelement <2 x ptr> %gep, i32 1
+ ret ptr %ee
}
-define i32* @gep_splat_both(i32* %base, i64 %idx) {
+define ptr @gep_splat_both(ptr %base, i64 %idx) {
; CHECK-LABEL: @gep_splat_both(
-; CHECK-NEXT: [[BASEVEC2:%.*]] = insertelement <2 x i32*> undef, i32* [[BASE:%.*]], i64 1
+; CHECK-NEXT: [[BASEVEC2:%.*]] = insertelement <2 x ptr> undef, ptr [[BASE:%.*]], i64 1
; CHECK-NEXT: [[IDXVEC2:%.*]] = insertelement <2 x i64> undef, i64 [[IDX:%.*]], i64 1
-; CHECK-NEXT: [[GEP:%.*]] = getelementptr i32, <2 x i32*> [[BASEVEC2]], <2 x i64> [[IDXVEC2]]
-; CHECK-NEXT: [[EE:%.*]] = extractelement <2 x i32*> [[GEP]], i64 1
-; CHECK-NEXT: ret i32* [[EE]]
+; CHECK-NEXT: [[GEP:%.*]] = getelementptr i32, <2 x ptr> [[BASEVEC2]], <2 x i64> [[IDXVEC2]]
+; CHECK-NEXT: [[EE:%.*]] = extractelement <2 x ptr> [[GEP]], i64 1
+; CHECK-NEXT: ret ptr [[EE]]
;
- %basevec1 = insertelement <2 x i32*> undef, i32* %base, i32 0
- %basevec2 = shufflevector <2 x i32*> %basevec1, <2 x i32*> undef, <2 x i32> zeroinitializer
+ %basevec1 = insertelement <2 x ptr> undef, ptr %base, i32 0
+ %basevec2 = shufflevector <2 x ptr> %basevec1, <2 x ptr> undef, <2 x i32> zeroinitializer
%idxvec1 = insertelement <2 x i64> undef, i64 %idx, i32 0
%idxvec2 = shufflevector <2 x i64> %idxvec1, <2 x i64> undef, <2 x i32> zeroinitializer
- %gep = getelementptr i32, <2 x i32*> %basevec2, <2 x i64> %idxvec2
- %ee = extractelement <2 x i32*> %gep, i32 1
- ret i32* %ee
+ %gep = getelementptr i32, <2 x ptr> %basevec2, <2 x i64> %idxvec2
+ %ee = extractelement <2 x ptr> %gep, i32 1
+ ret ptr %ee
}
-define <2 x i32*> @gep_all_lanes_undef(i32* %base, i64 %idx) {;
+define <2 x ptr> @gep_all_lanes_undef(ptr %base, i64 %idx) {;
; CHECK-LABEL: @gep_all_lanes_undef(
-; CHECK-NEXT: [[BASEVEC:%.*]] = insertelement <2 x i32*> undef, i32* [[BASE:%.*]], i64 0
+; CHECK-NEXT: [[BASEVEC:%.*]] = insertelement <2 x ptr> undef, ptr [[BASE:%.*]], i64 0
; CHECK-NEXT: [[IDXVEC:%.*]] = insertelement <2 x i64> undef, i64 [[IDX:%.*]], i64 1
-; CHECK-NEXT: [[GEP:%.*]] = getelementptr i32, <2 x i32*> [[BASEVEC]], <2 x i64> [[IDXVEC]]
-; CHECK-NEXT: ret <2 x i32*> [[GEP]]
+; CHECK-NEXT: [[GEP:%.*]] = getelementptr i32, <2 x ptr> [[BASEVEC]], <2 x i64> [[IDXVEC]]
+; CHECK-NEXT: ret <2 x ptr> [[GEP]]
;
- %basevec = insertelement <2 x i32*> undef, i32* %base, i32 0
+ %basevec = insertelement <2 x ptr> undef, ptr %base, i32 0
%idxvec = insertelement <2 x i64> undef, i64 %idx, i32 1
- %gep = getelementptr i32, <2 x i32*> %basevec, <2 x i64> %idxvec
- ret <2 x i32*> %gep
+ %gep = getelementptr i32, <2 x ptr> %basevec, <2 x i64> %idxvec
+ ret <2 x ptr> %gep
}
-define i32* @gep_demanded_lane_undef(i32* %base, i64 %idx) {
+define ptr @gep_demanded_lane_undef(ptr %base, i64 %idx) {
; CHECK-LABEL: @gep_demanded_lane_undef(
-; CHECK-NEXT: ret i32* undef
+; CHECK-NEXT: ret ptr undef
;
- %basevec = insertelement <2 x i32*> undef, i32* %base, i32 0
+ %basevec = insertelement <2 x ptr> undef, ptr %base, i32 0
%idxvec = insertelement <2 x i64> undef, i64 %idx, i32 1
- %gep = getelementptr i32, <2 x i32*> %basevec, <2 x i64> %idxvec
- %ee = extractelement <2 x i32*> %gep, i32 1
- ret i32* %ee
+ %gep = getelementptr i32, <2 x ptr> %basevec, <2 x i64> %idxvec
+ %ee = extractelement <2 x ptr> %gep, i32 1
+ ret ptr %ee
}
;; LangRef has an odd quirk around FCAs which make it illegal to use undef
;; indices.
-define i32* @PR41624(<2 x { i32, i32 }*> %a) {
+define ptr @PR41624(<2 x ptr> %a) {
; CHECK-LABEL: @PR41624(
-; CHECK-NEXT: [[W:%.*]] = getelementptr { i32, i32 }, <2 x { i32, i32 }*> [[A:%.*]], <2 x i64> <i64 5, i64 5>, <2 x i32> zeroinitializer
-; CHECK-NEXT: [[R:%.*]] = extractelement <2 x i32*> [[W]], i64 0
-; CHECK-NEXT: ret i32* [[R]]
+; CHECK-NEXT: [[W:%.*]] = getelementptr { i32, i32 }, <2 x ptr> [[A:%.*]], <2 x i64> <i64 5, i64 5>, <2 x i32> zeroinitializer
+; CHECK-NEXT: [[R:%.*]] = extractelement <2 x ptr> [[W]], i64 0
+; CHECK-NEXT: ret ptr [[R]]
;
- %w = getelementptr { i32, i32 }, <2 x { i32, i32 }*> %a, <2 x i64> <i64 5, i64 5>, <2 x i32> zeroinitializer
- %r = extractelement <2 x i32*> %w, i32 0
- ret i32* %r
+ %w = getelementptr { i32, i32 }, <2 x ptr> %a, <2 x i64> <i64 5, i64 5>, <2 x i32> zeroinitializer
+ %r = extractelement <2 x ptr> %w, i32 0
+ ret ptr %r
}
@global = external global [0 x i32], align 4
; Make sure we don't get stuck in a loop turning the zeroinitializer into
; <0, undef, undef, undef> and then changing it back.
-define i32* @zero_sized_type_extract(<4 x i64> %arg, i64 %arg1) {
+define ptr @zero_sized_type_extract(<4 x i64> %arg, i64 %arg1) {
; CHECK-LABEL: @zero_sized_type_extract(
; CHECK-NEXT: bb:
-; CHECK-NEXT: [[T:%.*]] = getelementptr inbounds [0 x i32], <4 x [0 x i32]*> <[0 x i32]* @global, [0 x i32]* poison, [0 x i32]* poison, [0 x i32]* poison>, <4 x i64> <i64 0, i64 poison, i64 poison, i64 poison>, <4 x i64> [[ARG:%.*]]
-; CHECK-NEXT: [[T2:%.*]] = extractelement <4 x i32*> [[T]], i64 0
-; CHECK-NEXT: ret i32* [[T2]]
+; CHECK-NEXT: [[T:%.*]] = getelementptr inbounds [0 x i32], <4 x ptr> <ptr @global, ptr poison, ptr poison, ptr poison>, <4 x i64> <i64 0, i64 poison, i64 poison, i64 poison>, <4 x i64> [[ARG:%.*]]
+; CHECK-NEXT: [[T2:%.*]] = extractelement <4 x ptr> [[T]], i64 0
+; CHECK-NEXT: ret ptr [[T2]]
;
bb:
- %t = getelementptr inbounds [0 x i32], <4 x [0 x i32]*> <[0 x i32]* @global, [0 x i32]* @global, [0 x i32]* @global, [0 x i32]* @global>, <4 x i64> zeroinitializer, <4 x i64> %arg
- %t2 = extractelement <4 x i32*> %t, i64 0
- ret i32* %t2
+ %t = getelementptr inbounds [0 x i32], <4 x ptr> <ptr @global, ptr @global, ptr @global, ptr @global>, <4 x i64> zeroinitializer, <4 x i64> %arg
+ %t2 = extractelement <4 x ptr> %t, i64 0
+ ret ptr %t2
}
; The non-zero elements of the result are always 'y', so the splat is unnecessary.
; NOTE: Assertions have been autogenerated by utils/update_test_checks.py
; RUN: opt < %s -passes=instcombine -S | FileCheck %s
-define void @test(<4 x i32> %v, i64 *%r1, i64 *%r2) {
+define void @test(<4 x i32> %v, ptr %r1, ptr %r2) {
; CHECK-LABEL: @test(
; CHECK-NEXT: [[TMP1:%.*]] = extractelement <4 x i32> [[V:%.*]], i64 0
; CHECK-NEXT: [[TMP2:%.*]] = zext i32 [[TMP1]] to i64
-; CHECK-NEXT: store i64 [[TMP2]], i64* [[R1:%.*]], align 4
-; CHECK-NEXT: store i64 [[TMP2]], i64* [[R2:%.*]], align 4
+; CHECK-NEXT: store i64 [[TMP2]], ptr [[R1:%.*]], align 4
+; CHECK-NEXT: store i64 [[TMP2]], ptr [[R2:%.*]], align 4
; CHECK-NEXT: ret void
;
%1 = zext <4 x i32> %v to <4 x i64>
%2 = extractelement <4 x i64> %1, i32 0
- store i64 %2, i64 *%r1
- store i64 %2, i64 *%r2
+ store i64 %2, ptr %r1
+ store i64 %2, ptr %r2
ret void
}
; RUN: opt < %s -passes=instcombine -S | FileCheck %s
-define void @test (float %b, <8 x float> * %p) {
+define void @test (float %b, ptr %p) {
; CHECK: extractelement
; CHECK: fptosi
- %1 = load <8 x float> , <8 x float> * %p
+ %1 = load <8 x float> , ptr %p
%2 = bitcast <8 x float> %1 to <8 x i32>
%3 = bitcast <8 x i32> %2 to <8 x float>
%a = fptosi <8 x float> %3 to <8 x i32>
%6 = extractelement <8 x i32> %a, i32 %5
%7 = insertelement <8 x i32> poison, i32 %6, i32 7
%8 = sitofp <8 x i32> %7 to <8 x float>
- store <8 x float> %8, <8 x float>* %p
+ store <8 x float> %8, ptr %p
ret void
}
; RUN: opt < %s -passes=instcombine -S | FileCheck %s
-define void @test (float %b, <8 x float> * %p) {
+define void @test (float %b, ptr %p) {
; CHECK: extractelement
; CHECK: fptosi
- %1 = load <8 x float> , <8 x float> * %p
+ %1 = load <8 x float> , ptr %p
%2 = bitcast <8 x float> %1 to <8 x i32>
%3 = bitcast <8 x i32> %2 to <8 x float>
%a = fptosi <8 x float> %3 to <8 x i32>
%6 = extractelement <8 x i32> %a, i32 %5
%7 = insertelement <8 x i32> undef, i32 %6, i32 7
%8 = sitofp <8 x i32> %7 to <8 x float>
- store <8 x float> %8, <8 x float>* %p
+ store <8 x float> %8, ptr %p
ret void
}
; NOTE: Assertions have been autogenerated by utils/update_test_checks.py
; RUN: opt -passes=instcombine -S < %s | FileCheck %s
-define <4 x i16*> @PR41270([4 x i16]* %x) {
+define <4 x ptr> @PR41270(ptr %x) {
; CHECK-LABEL: @PR41270(
-; CHECK-NEXT: [[T3:%.*]] = getelementptr inbounds [4 x i16], [4 x i16]* [[X:%.*]], i64 0, i64 3
-; CHECK-NEXT: [[INS2:%.*]] = insertelement <4 x i16*> poison, i16* [[T3]], i64 0
-; CHECK-NEXT: ret <4 x i16*> [[INS2]]
+; CHECK-NEXT: [[T3:%.*]] = getelementptr inbounds [4 x i16], ptr [[X:%.*]], i64 0, i64 3
+; CHECK-NEXT: [[INS2:%.*]] = insertelement <4 x ptr> poison, ptr [[T3]], i64 0
+; CHECK-NEXT: ret <4 x ptr> [[INS2]]
;
- %ins = insertelement <4 x [4 x i16]*> poison, [4 x i16]* %x, i32 0
- %splat = shufflevector <4 x [4 x i16]*> %ins, <4 x [4 x i16]*> poison, <4 x i32> zeroinitializer
- %t2 = getelementptr inbounds [4 x i16], <4 x [4 x i16]*> %splat, i32 0, i32 3
- %t3 = extractelement <4 x i16*> %t2, i32 3
- %ins2 = insertelement <4 x i16*> poison, i16* %t3, i32 0
- ret <4 x i16*> %ins2
+ %ins = insertelement <4 x ptr> poison, ptr %x, i32 0
+ %splat = shufflevector <4 x ptr> %ins, <4 x ptr> poison, <4 x i32> zeroinitializer
+ %t2 = getelementptr inbounds [4 x i16], <4 x ptr> %splat, i32 0, i32 3
+ %t3 = extractelement <4 x ptr> %t2, i32 3
+ %ins2 = insertelement <4 x ptr> poison, ptr %t3, i32 0
+ ret <4 x ptr> %ins2
}
; NOTE: Assertions have been autogenerated by utils/update_test_checks.py
; RUN: opt -passes=instcombine -S < %s | FileCheck %s
-define <4 x i16*> @PR41270([4 x i16]* %x) {
+define <4 x ptr> @PR41270(ptr %x) {
; CHECK-LABEL: @PR41270(
-; CHECK-NEXT: [[T3:%.*]] = getelementptr inbounds [4 x i16], [4 x i16]* [[X:%.*]], i64 0, i64 3
-; CHECK-NEXT: [[INS2:%.*]] = insertelement <4 x i16*> undef, i16* [[T3]], i64 0
-; CHECK-NEXT: ret <4 x i16*> [[INS2]]
+; CHECK-NEXT: [[T3:%.*]] = getelementptr inbounds [4 x i16], ptr [[X:%.*]], i64 0, i64 3
+; CHECK-NEXT: [[INS2:%.*]] = insertelement <4 x ptr> undef, ptr [[T3]], i64 0
+; CHECK-NEXT: ret <4 x ptr> [[INS2]]
;
- %ins = insertelement <4 x [4 x i16]*> undef, [4 x i16]* %x, i32 0
- %splat = shufflevector <4 x [4 x i16]*> %ins, <4 x [4 x i16]*> undef, <4 x i32> zeroinitializer
- %t2 = getelementptr inbounds [4 x i16], <4 x [4 x i16]*> %splat, i32 0, i32 3
- %t3 = extractelement <4 x i16*> %t2, i32 3
- %ins2 = insertelement <4 x i16*> undef, i16* %t3, i32 0
- ret <4 x i16*> %ins2
+ %ins = insertelement <4 x ptr> undef, ptr %x, i32 0
+ %splat = shufflevector <4 x ptr> %ins, <4 x ptr> undef, <4 x i32> zeroinitializer
+ %t2 = getelementptr inbounds [4 x i16], <4 x ptr> %splat, i32 0, i32 3
+ %t3 = extractelement <4 x ptr> %t2, i32 3
+ %ins2 = insertelement <4 x ptr> undef, ptr %t3, i32 0
+ ret <4 x ptr> %ins2
}
; NOTE: Assertions have been autogenerated by utils/update_test_checks.py
; RUN: opt < %s -passes=instcombine -S | FileCheck %s
-define void @f(i64 %val, i32 %limit, i32 *%ptr) {
+define void @f(i64 %val, i32 %limit, ptr %ptr) {
; CHECK-LABEL: @f(
; CHECK-NEXT: entry:
; CHECK-NEXT: [[TMP0:%.*]] = trunc i64 [[VAL:%.*]] to i32
; CHECK-NEXT: [[END:%.*]] = icmp ult i32 [[TMP1]], [[LIMIT:%.*]]
; CHECK-NEXT: [[TMP2:%.*]] = add i32 [[TMP1]], 10
; CHECK-NEXT: [[TMP3:%.*]] = sext i32 [[TMP1]] to i64
-; CHECK-NEXT: [[TMP4:%.*]] = getelementptr i32, i32* [[PTR:%.*]], i64 [[TMP3]]
-; CHECK-NEXT: store i32 [[TMP2]], i32* [[TMP4]], align 4
+; CHECK-NEXT: [[TMP4:%.*]] = getelementptr i32, ptr [[PTR:%.*]], i64 [[TMP3]]
+; CHECK-NEXT: store i32 [[TMP2]], ptr [[TMP4]], align 4
; CHECK-NEXT: [[TMP5]] = add i32 [[TMP1]], 16
; CHECK-NEXT: br i1 [[END]], label [[LOOP]], label [[RET:%.*]]
; CHECK: ret:
%end = icmp ult i32 %elt, %limit
%3 = add i32 10, %elt
%4 = sext i32 %elt to i64
- %5 = getelementptr i32, i32* %ptr, i64 %4
- store i32 %3, i32* %5
+ %5 = getelementptr i32, ptr %ptr, i64 %4
+ store i32 %3, ptr %5
%inc = add <16 x i32> %2, <i32 16, i32 16, i32 16, i32 16, i32 16, i32 16, i32 16, i32 16, i32 16, i32 16, i32 16, i32 16, i32 16, i32 16, i32 16, i32 16>
br i1 %end, label %loop, label %ret
ret void
}
-define void @copy(i64 %val, i32 %limit, i32 *%ptr) {
+define void @copy(i64 %val, i32 %limit, ptr %ptr) {
; CHECK-LABEL: @copy(
; CHECK-NEXT: entry:
; CHECK-NEXT: [[TMP0:%.*]] = trunc i64 [[VAL:%.*]] to i32
; CHECK-NEXT: [[END:%.*]] = icmp ult i32 [[TMP1]], [[LIMIT:%.*]]
; CHECK-NEXT: [[TMP2:%.*]] = add i32 [[TMP1]], 10
; CHECK-NEXT: [[TMP3:%.*]] = sext i32 [[TMP1]] to i64
-; CHECK-NEXT: [[TMP4:%.*]] = getelementptr i32, i32* [[PTR:%.*]], i64 [[TMP3]]
-; CHECK-NEXT: store i32 [[TMP2]], i32* [[TMP4]], align 4
+; CHECK-NEXT: [[TMP4:%.*]] = getelementptr i32, ptr [[PTR:%.*]], i64 [[TMP3]]
+; CHECK-NEXT: store i32 [[TMP2]], ptr [[TMP4]], align 4
; CHECK-NEXT: [[TMP5]] = add i32 [[TMP1]], 16
; CHECK-NEXT: br i1 [[END]], label [[LOOP]], label [[RET:%.*]]
; CHECK: ret:
%end = icmp ult i32 %elt, %limit
%3 = add i32 10, %eltcopy
%4 = sext i32 %elt to i64
- %5 = getelementptr i32, i32* %ptr, i64 %4
- store i32 %3, i32* %5
+ %5 = getelementptr i32, ptr %ptr, i64 %4
+ store i32 %3, ptr %5
%inc = add <16 x i32> %2, <i32 16, i32 16, i32 16, i32 16, i32 16, i32 16, i32 16, i32 16, i32 16, i32 16, i32 16, i32 16, i32 16, i32 16, i32 16, i32 16>
br i1 %end, label %loop, label %ret
ret void
}
-define void @nocopy(i64 %val, i32 %limit, i32 *%ptr) {
+define void @nocopy(i64 %val, i32 %limit, ptr %ptr) {
; CHECK-LABEL: @nocopy(
; CHECK-NEXT: entry:
; CHECK-NEXT: [[TMP0:%.*]] = trunc i64 [[VAL:%.*]] to i32
; CHECK-NEXT: [[END:%.*]] = icmp ult i32 [[ELT]], [[LIMIT:%.*]]
; CHECK-NEXT: [[TMP5:%.*]] = add i32 [[ELTCOPY]], 10
; CHECK-NEXT: [[TMP6:%.*]] = sext i32 [[ELT]] to i64
-; CHECK-NEXT: [[TMP7:%.*]] = getelementptr i32, i32* [[PTR:%.*]], i64 [[TMP6]]
-; CHECK-NEXT: store i32 [[TMP5]], i32* [[TMP7]], align 4
+; CHECK-NEXT: [[TMP7:%.*]] = getelementptr i32, ptr [[PTR:%.*]], i64 [[TMP6]]
+; CHECK-NEXT: store i32 [[TMP5]], ptr [[TMP7]], align 4
; CHECK-NEXT: [[INC]] = add <16 x i32> [[TMP4]], <i32 16, i32 16, i32 16, i32 16, i32 16, i32 16, i32 16, i32 16, i32 16, i32 16, i32 16, i32 16, i32 16, i32 16, i32 16, i32 16>
; CHECK-NEXT: br i1 [[END]], label [[LOOP]], label [[RET:%.*]]
; CHECK: ret:
%end = icmp ult i32 %elt, %limit
%3 = add i32 10, %eltcopy
%4 = sext i32 %elt to i64
- %5 = getelementptr i32, i32* %ptr, i64 %4
- store i32 %3, i32* %5
+ %5 = getelementptr i32, ptr %ptr, i64 %4
+ store i32 %3, ptr %5
%inc = add <16 x i32> %2, <i32 16, i32 16, i32 16, i32 16, i32 16, i32 16, i32 16, i32 16, i32 16, i32 16, i32 16, i32 16, i32 16, i32 16, i32 16, i32 16>
br i1 %end, label %loop, label %ret
; NOTE: Assertions have been autogenerated by utils/update_test_checks.py
; RUN: opt < %s -passes=instcombine -S | FileCheck %s
-define void @f(i64 %val, i32 %limit, i32 *%ptr) {
+define void @f(i64 %val, i32 %limit, ptr %ptr) {
; CHECK-LABEL: @f(
; CHECK-NEXT: entry:
; CHECK-NEXT: [[TMP0:%.*]] = trunc i64 [[VAL:%.*]] to i32
; CHECK-NEXT: [[END:%.*]] = icmp ult i32 [[TMP1]], [[LIMIT:%.*]]
; CHECK-NEXT: [[TMP2:%.*]] = add i32 [[TMP1]], 10
; CHECK-NEXT: [[TMP3:%.*]] = sext i32 [[TMP1]] to i64
-; CHECK-NEXT: [[TMP4:%.*]] = getelementptr i32, i32* [[PTR:%.*]], i64 [[TMP3]]
-; CHECK-NEXT: store i32 [[TMP2]], i32* [[TMP4]], align 4
+; CHECK-NEXT: [[TMP4:%.*]] = getelementptr i32, ptr [[PTR:%.*]], i64 [[TMP3]]
+; CHECK-NEXT: store i32 [[TMP2]], ptr [[TMP4]], align 4
; CHECK-NEXT: [[TMP5]] = add i32 [[TMP1]], 16
; CHECK-NEXT: br i1 [[END]], label [[LOOP]], label [[RET:%.*]]
; CHECK: ret:
%end = icmp ult i32 %elt, %limit
%3 = add i32 10, %elt
%4 = sext i32 %elt to i64
- %5 = getelementptr i32, i32* %ptr, i64 %4
- store i32 %3, i32* %5
+ %5 = getelementptr i32, ptr %ptr, i64 %4
+ store i32 %3, ptr %5
%inc = add <16 x i32> %2, <i32 16, i32 16, i32 16, i32 16, i32 16, i32 16, i32 16, i32 16, i32 16, i32 16, i32 16, i32 16, i32 16, i32 16, i32 16, i32 16>
br i1 %end, label %loop, label %ret
ret void
}
-define void @copy(i64 %val, i32 %limit, i32 *%ptr) {
+define void @copy(i64 %val, i32 %limit, ptr %ptr) {
; CHECK-LABEL: @copy(
; CHECK-NEXT: entry:
; CHECK-NEXT: [[TMP0:%.*]] = trunc i64 [[VAL:%.*]] to i32
; CHECK-NEXT: [[END:%.*]] = icmp ult i32 [[TMP1]], [[LIMIT:%.*]]
; CHECK-NEXT: [[TMP2:%.*]] = add i32 [[TMP1]], 10
; CHECK-NEXT: [[TMP3:%.*]] = sext i32 [[TMP1]] to i64
-; CHECK-NEXT: [[TMP4:%.*]] = getelementptr i32, i32* [[PTR:%.*]], i64 [[TMP3]]
-; CHECK-NEXT: store i32 [[TMP2]], i32* [[TMP4]], align 4
+; CHECK-NEXT: [[TMP4:%.*]] = getelementptr i32, ptr [[PTR:%.*]], i64 [[TMP3]]
+; CHECK-NEXT: store i32 [[TMP2]], ptr [[TMP4]], align 4
; CHECK-NEXT: [[TMP5]] = add i32 [[TMP1]], 16
; CHECK-NEXT: br i1 [[END]], label [[LOOP]], label [[RET:%.*]]
; CHECK: ret:
%end = icmp ult i32 %elt, %limit
%3 = add i32 10, %eltcopy
%4 = sext i32 %elt to i64
- %5 = getelementptr i32, i32* %ptr, i64 %4
- store i32 %3, i32* %5
+ %5 = getelementptr i32, ptr %ptr, i64 %4
+ store i32 %3, ptr %5
%inc = add <16 x i32> %2, <i32 16, i32 16, i32 16, i32 16, i32 16, i32 16, i32 16, i32 16, i32 16, i32 16, i32 16, i32 16, i32 16, i32 16, i32 16, i32 16>
br i1 %end, label %loop, label %ret
ret void
}
-define void @nocopy(i64 %val, i32 %limit, i32 *%ptr) {
+define void @nocopy(i64 %val, i32 %limit, ptr %ptr) {
; CHECK-LABEL: @nocopy(
; CHECK-NEXT: entry:
; CHECK-NEXT: [[TMP0:%.*]] = trunc i64 [[VAL:%.*]] to i32
; CHECK-NEXT: [[END:%.*]] = icmp ult i32 [[ELT]], [[LIMIT:%.*]]
; CHECK-NEXT: [[TMP5:%.*]] = add i32 [[ELTCOPY]], 10
; CHECK-NEXT: [[TMP6:%.*]] = sext i32 [[ELT]] to i64
-; CHECK-NEXT: [[TMP7:%.*]] = getelementptr i32, i32* [[PTR:%.*]], i64 [[TMP6]]
-; CHECK-NEXT: store i32 [[TMP5]], i32* [[TMP7]], align 4
+; CHECK-NEXT: [[TMP7:%.*]] = getelementptr i32, ptr [[PTR:%.*]], i64 [[TMP6]]
+; CHECK-NEXT: store i32 [[TMP5]], ptr [[TMP7]], align 4
; CHECK-NEXT: [[INC]] = add <16 x i32> [[TMP4]], <i32 16, i32 16, i32 16, i32 16, i32 16, i32 16, i32 16, i32 16, i32 16, i32 16, i32 16, i32 16, i32 16, i32 16, i32 16, i32 16>
; CHECK-NEXT: br i1 [[END]], label [[LOOP]], label [[RET:%.*]]
; CHECK: ret:
%end = icmp ult i32 %elt, %limit
%3 = add i32 10, %eltcopy
%4 = sext i32 %elt to i64
- %5 = getelementptr i32, i32* %ptr, i64 %4
- store i32 %3, i32* %5
+ %5 = getelementptr i32, ptr %ptr, i64 %4
+ store i32 %3, ptr %5
%inc = add <16 x i32> %2, <i32 16, i32 16, i32 16, i32 16, i32 16, i32 16, i32 16, i32 16, i32 16, i32 16, i32 16, i32 16, i32 16, i32 16, i32 16, i32 16>
br i1 %end, label %loop, label %ret
define void @test14(i16 %conv10) {
; CHECK-LABEL: @test14(
-; CHECK-NEXT: store <4 x i16> <i16 poison, i16 poison, i16 poison, i16 23>, <4 x i16>* undef, align 8
+; CHECK-NEXT: store <4 x i16> <i16 poison, i16 poison, i16 poison, i16 23>, ptr undef, align 8
; CHECK-NEXT: ret void
;
%t = alloca <4 x i16>, align 8
%vecinit6 = insertelement <4 x i16> poison, i16 23, i32 3
- store <4 x i16> %vecinit6, <4 x i16>* undef
- %t1 = load <4 x i16>, <4 x i16>* undef
+ store <4 x i16> %vecinit6, ptr undef
+ %t1 = load <4 x i16>, ptr undef
%vecinit11 = insertelement <4 x i16> poison, i16 %conv10, i32 3
%div = udiv <4 x i16> %t1, %vecinit11
- store <4 x i16> %div, <4 x i16>* %t
- %t4 = load <4 x i16>, <4 x i16>* %t
+ store <4 x i16> %div, ptr %t
+ %t4 = load <4 x i16>, ptr %t
%t5 = shufflevector <4 x i16> %t4, <4 x i16> poison, <2 x i32> <i32 2, i32 0>
%cmp = icmp ule <2 x i16> %t5, undef
%sext = sext <2 x i1> %cmp to <2 x i16>
define <4 x i32> @pr20114(<4 x i32> %__mask) {
; CHECK-LABEL: @pr20114(
; CHECK-NEXT: [[MASK01_I:%.*]] = shufflevector <4 x i32> [[__MASK:%.*]], <4 x i32> poison, <4 x i32> <i32 0, i32 0, i32 1, i32 1>
-; CHECK-NEXT: [[MASKED_NEW_I_I_I:%.*]] = and <4 x i32> [[MASK01_I]], bitcast (<2 x i64> <i64 ptrtoint (<4 x i32> (<4 x i32>)* @pr20114 to i64), i64 ptrtoint (<4 x i32> (<4 x i32>)* @pr20114 to i64)> to <4 x i32>)
+; CHECK-NEXT: [[MASKED_NEW_I_I_I:%.*]] = and <4 x i32> [[MASK01_I]], bitcast (<2 x i64> <i64 ptrtoint (ptr @pr20114 to i64), i64 ptrtoint (ptr @pr20114 to i64)> to <4 x i32>)
; CHECK-NEXT: ret <4 x i32> [[MASKED_NEW_I_I_I]]
;
%mask01.i = shufflevector <4 x i32> %__mask, <4 x i32> poison, <4 x i32> <i32 0, i32 0, i32 1, i32 1>
- %masked_new.i.i.i = and <4 x i32> bitcast (<2 x i64> <i64 ptrtoint (<4 x i32> (<4 x i32>)* @pr20114 to i64), i64 ptrtoint (<4 x i32> (<4 x i32>)* @pr20114 to i64)> to <4 x i32>), %mask01.i
+ %masked_new.i.i.i = and <4 x i32> bitcast (<2 x i64> <i64 ptrtoint (ptr @pr20114 to i64), i64 ptrtoint (ptr @pr20114 to i64)> to <4 x i32>), %mask01.i
ret <4 x i32> %masked_new.i.i.i
}
-define <2 x i32*> @pr23113(<4 x i32*> %A) {
+define <2 x ptr> @pr23113(<4 x ptr> %A) {
; CHECK-LABEL: @pr23113(
-; CHECK-NEXT: [[TMP1:%.*]] = shufflevector <4 x i32*> [[A:%.*]], <4 x i32*> poison, <2 x i32> <i32 0, i32 1>
-; CHECK-NEXT: ret <2 x i32*> [[TMP1]]
+; CHECK-NEXT: [[TMP1:%.*]] = shufflevector <4 x ptr> [[A:%.*]], <4 x ptr> poison, <2 x i32> <i32 0, i32 1>
+; CHECK-NEXT: ret <2 x ptr> [[TMP1]]
;
- %1 = shufflevector <4 x i32*> %A, <4 x i32*> poison, <2 x i32> <i32 0, i32 1>
- ret <2 x i32*> %1
+ %1 = shufflevector <4 x ptr> %A, <4 x ptr> poison, <2 x i32> <i32 0, i32 1>
+ ret <2 x ptr> %1
}
; Unused lanes in the new binop should not kill the entire op (although it may simplify anyway as shown here).
; Demanded vector elements may not be able to simplify a shuffle mask
; before we try to narrow it. This used to crash.
-define <4 x float> @insert_subvector_crash_invalid_mask_elt(<2 x float> %x, <4 x float>* %p) {
+define <4 x float> @insert_subvector_crash_invalid_mask_elt(<2 x float> %x, ptr %p) {
; CHECK-LABEL: @insert_subvector_crash_invalid_mask_elt(
; CHECK-NEXT: [[WIDEN:%.*]] = shufflevector <2 x float> [[X:%.*]], <2 x float> poison, <4 x i32> <i32 0, i32 1, i32 undef, i32 undef>
; CHECK-NEXT: [[I:%.*]] = shufflevector <2 x float> [[X]], <2 x float> poison, <4 x i32> <i32 0, i32 1, i32 undef, i32 undef>
-; CHECK-NEXT: store <4 x float> [[I]], <4 x float>* [[P:%.*]], align 16
+; CHECK-NEXT: store <4 x float> [[I]], ptr [[P:%.*]], align 16
; CHECK-NEXT: ret <4 x float> [[WIDEN]]
;
%widen = shufflevector <2 x float> %x, <2 x float> poison, <4 x i32> <i32 0, i32 1, i32 undef, i32 undef>
%ext2 = extractelement <2 x float> %x, i32 0
%I = insertelement <4 x float> %widen, float %ext2, i16 0
- store <4 x float> %I, <4 x float>* %p
+ store <4 x float> %I, ptr %p
ret <4 x float> %widen
}
define <4 x i32> @PR46872(<4 x i32> %x) {
; CHECK-LABEL: @PR46872(
; CHECK-NEXT: [[S:%.*]] = shufflevector <4 x i32> [[X:%.*]], <4 x i32> poison, <4 x i32> <i32 undef, i32 0, i32 1, i32 1>
-; CHECK-NEXT: [[A:%.*]] = and <4 x i32> [[S]], bitcast (<2 x i64> <i64 ptrtoint (<4 x i32> (<4 x i32>)* @PR46872 to i64), i64 ptrtoint (<4 x i32> (<4 x i32>)* @PR46872 to i64)> to <4 x i32>)
+; CHECK-NEXT: [[A:%.*]] = and <4 x i32> [[S]], bitcast (<2 x i64> <i64 ptrtoint (ptr @PR46872 to i64), i64 ptrtoint (ptr @PR46872 to i64)> to <4 x i32>)
; CHECK-NEXT: ret <4 x i32> [[A]]
;
%s = shufflevector <4 x i32> %x, <4 x i32> poison, <4 x i32> <i32 undef, i32 0, i32 1, i32 1>
- %a = and <4 x i32> %s, bitcast (<2 x i64> <i64 ptrtoint (<4 x i32> (<4 x i32>)* @PR46872 to i64), i64 ptrtoint (<4 x i32> (<4 x i32>)* @PR46872 to i64)> to <4 x i32>)
+ %a = and <4 x i32> %s, bitcast (<2 x i64> <i64 ptrtoint (ptr @PR46872 to i64), i64 ptrtoint (ptr @PR46872 to i64)> to <4 x i32>)
ret <4 x i32> %a
}
define void @test14(i16 %conv10) {
; CHECK-LABEL: @test14(
-; CHECK-NEXT: store <4 x i16> <i16 undef, i16 undef, i16 undef, i16 23>, <4 x i16>* undef, align 8
+; CHECK-NEXT: store <4 x i16> <i16 undef, i16 undef, i16 undef, i16 23>, ptr undef, align 8
; CHECK-NEXT: ret void
;
%t = alloca <4 x i16>, align 8
%vecinit6 = insertelement <4 x i16> undef, i16 23, i32 3
- store <4 x i16> %vecinit6, <4 x i16>* undef
- %t1 = load <4 x i16>, <4 x i16>* undef
+ store <4 x i16> %vecinit6, ptr undef
+ %t1 = load <4 x i16>, ptr undef
%vecinit11 = insertelement <4 x i16> undef, i16 %conv10, i32 3
%div = udiv <4 x i16> %t1, %vecinit11
- store <4 x i16> %div, <4 x i16>* %t
- %t4 = load <4 x i16>, <4 x i16>* %t
+ store <4 x i16> %div, ptr %t
+ %t4 = load <4 x i16>, ptr %t
%t5 = shufflevector <4 x i16> %t4, <4 x i16> undef, <2 x i32> <i32 2, i32 0>
%cmp = icmp ule <2 x i16> %t5, undef
%sext = sext <2 x i1> %cmp to <2 x i16>
define <4 x i32> @pr20114(<4 x i32> %__mask) {
; CHECK-LABEL: @pr20114(
; CHECK-NEXT: [[MASK01_I:%.*]] = shufflevector <4 x i32> [[__MASK:%.*]], <4 x i32> undef, <4 x i32> <i32 0, i32 0, i32 1, i32 1>
-; CHECK-NEXT: [[MASKED_NEW_I_I_I:%.*]] = and <4 x i32> [[MASK01_I]], bitcast (<2 x i64> <i64 ptrtoint (<4 x i32> (<4 x i32>)* @pr20114 to i64), i64 ptrtoint (<4 x i32> (<4 x i32>)* @pr20114 to i64)> to <4 x i32>)
+; CHECK-NEXT: [[MASKED_NEW_I_I_I:%.*]] = and <4 x i32> [[MASK01_I]], bitcast (<2 x i64> <i64 ptrtoint (ptr @pr20114 to i64), i64 ptrtoint (ptr @pr20114 to i64)> to <4 x i32>)
; CHECK-NEXT: ret <4 x i32> [[MASKED_NEW_I_I_I]]
;
%mask01.i = shufflevector <4 x i32> %__mask, <4 x i32> undef, <4 x i32> <i32 0, i32 0, i32 1, i32 1>
- %masked_new.i.i.i = and <4 x i32> bitcast (<2 x i64> <i64 ptrtoint (<4 x i32> (<4 x i32>)* @pr20114 to i64), i64 ptrtoint (<4 x i32> (<4 x i32>)* @pr20114 to i64)> to <4 x i32>), %mask01.i
+ %masked_new.i.i.i = and <4 x i32> bitcast (<2 x i64> <i64 ptrtoint (ptr @pr20114 to i64), i64 ptrtoint (ptr @pr20114 to i64)> to <4 x i32>), %mask01.i
ret <4 x i32> %masked_new.i.i.i
}
-define <2 x i32*> @pr23113(<4 x i32*> %A) {
+define <2 x ptr> @pr23113(<4 x ptr> %A) {
; CHECK-LABEL: @pr23113(
-; CHECK-NEXT: [[TMP1:%.*]] = shufflevector <4 x i32*> [[A:%.*]], <4 x i32*> undef, <2 x i32> <i32 0, i32 1>
-; CHECK-NEXT: ret <2 x i32*> [[TMP1]]
+; CHECK-NEXT: [[TMP1:%.*]] = shufflevector <4 x ptr> [[A:%.*]], <4 x ptr> undef, <2 x i32> <i32 0, i32 1>
+; CHECK-NEXT: ret <2 x ptr> [[TMP1]]
;
- %1 = shufflevector <4 x i32*> %A, <4 x i32*> undef, <2 x i32> <i32 0, i32 1>
- ret <2 x i32*> %1
+ %1 = shufflevector <4 x ptr> %A, <4 x ptr> undef, <2 x i32> <i32 0, i32 1>
+ ret <2 x ptr> %1
}
; Unused lanes in the new binop should not kill the entire op (although it may simplify anyway as shown here).
; Demanded vector elements may not be able to simplify a shuffle mask
; before we try to narrow it. This used to crash.
-define <4 x float> @insert_subvector_crash_invalid_mask_elt(<2 x float> %x, <4 x float>* %p) {
+define <4 x float> @insert_subvector_crash_invalid_mask_elt(<2 x float> %x, ptr %p) {
; CHECK-LABEL: @insert_subvector_crash_invalid_mask_elt(
; CHECK-NEXT: [[WIDEN:%.*]] = shufflevector <2 x float> [[X:%.*]], <2 x float> undef, <4 x i32> <i32 0, i32 1, i32 undef, i32 undef>
; CHECK-NEXT: [[I:%.*]] = shufflevector <2 x float> [[X]], <2 x float> poison, <4 x i32> <i32 0, i32 1, i32 undef, i32 undef>
-; CHECK-NEXT: store <4 x float> [[I]], <4 x float>* [[P:%.*]], align 16
+; CHECK-NEXT: store <4 x float> [[I]], ptr [[P:%.*]], align 16
; CHECK-NEXT: ret <4 x float> [[WIDEN]]
;
%widen = shufflevector <2 x float> %x, <2 x float> undef, <4 x i32> <i32 0, i32 1, i32 undef, i32 undef>
%ext2 = extractelement <2 x float> %x, i32 0
%I = insertelement <4 x float> %widen, float %ext2, i16 0
- store <4 x float> %I, <4 x float>* %p
+ store <4 x float> %I, ptr %p
ret <4 x float> %widen
}
define <4 x i32> @PR46872(<4 x i32> %x) {
; CHECK-LABEL: @PR46872(
; CHECK-NEXT: [[S:%.*]] = shufflevector <4 x i32> [[X:%.*]], <4 x i32> undef, <4 x i32> <i32 undef, i32 0, i32 1, i32 1>
-; CHECK-NEXT: [[A:%.*]] = and <4 x i32> [[S]], bitcast (<2 x i64> <i64 ptrtoint (<4 x i32> (<4 x i32>)* @PR46872 to i64), i64 ptrtoint (<4 x i32> (<4 x i32>)* @PR46872 to i64)> to <4 x i32>)
+; CHECK-NEXT: [[A:%.*]] = and <4 x i32> [[S]], bitcast (<2 x i64> <i64 ptrtoint (ptr @PR46872 to i64), i64 ptrtoint (ptr @PR46872 to i64)> to <4 x i32>)
; CHECK-NEXT: ret <4 x i32> [[A]]
;
%s = shufflevector <4 x i32> %x, <4 x i32> undef, <4 x i32> <i32 undef, i32 0, i32 1, i32 1>
- %a = and <4 x i32> %s, bitcast (<2 x i64> <i64 ptrtoint (<4 x i32> (<4 x i32>)* @PR46872 to i64), i64 ptrtoint (<4 x i32> (<4 x i32>)* @PR46872 to i64)> to <4 x i32>)
+ %a = and <4 x i32> %s, bitcast (<2 x i64> <i64 ptrtoint (ptr @PR46872 to i64), i64 ptrtoint (ptr @PR46872 to i64)> to <4 x i32>)
ret <4 x i32> %a
}
ret <2 x i64> %conv
}
-define void @convert(<2 x i32>* %dst.addr, <2 x i64> %src) {
+define void @convert(ptr %dst.addr, <2 x i64> %src) {
; CHECK-LABEL: @convert(
; CHECK-NEXT: [[VAL:%.*]] = trunc <2 x i64> [[SRC:%.*]] to <2 x i32>
; CHECK-NEXT: [[ADD:%.*]] = add <2 x i32> [[VAL]], <i32 1, i32 1>
-; CHECK-NEXT: store <2 x i32> [[ADD]], <2 x i32>* [[DST_ADDR:%.*]], align 8
+; CHECK-NEXT: store <2 x i32> [[ADD]], ptr [[DST_ADDR:%.*]], align 8
; CHECK-NEXT: ret void
;
%val = trunc <2 x i64> %src to <2 x i32>
%add = add <2 x i32> %val, <i32 1, i32 1>
- store <2 x i32> %add, <2 x i32>* %dst.addr
+ store <2 x i32> %add, ptr %dst.addr
ret void
}
%dim31 = insertelement <4 x i32> %dim30, i32 %a, i32 2
%dim32 = insertelement <4 x i32> %dim31, i32 %a, i32 3
- %offset_ptr = getelementptr <4 x float>, <4 x float>* null, i32 1
- %offset_int = ptrtoint <4 x float>* %offset_ptr to i64
+ %offset_ptr = getelementptr <4 x float>, ptr null, i32 1
+ %offset_int = ptrtoint ptr %offset_ptr to i64
%sizeof32 = trunc i64 %offset_int to i32
%smearinsert33 = insertelement <4 x i32> poison, i32 %sizeof32, i32 0
ret <2 x i64> %conv
}
-define void @convert(<2 x i32>* %dst.addr, <2 x i64> %src) {
+define void @convert(ptr %dst.addr, <2 x i64> %src) {
; CHECK-LABEL: @convert(
; CHECK-NEXT: [[VAL:%.*]] = trunc <2 x i64> [[SRC:%.*]] to <2 x i32>
; CHECK-NEXT: [[ADD:%.*]] = add <2 x i32> [[VAL]], <i32 1, i32 1>
-; CHECK-NEXT: store <2 x i32> [[ADD]], <2 x i32>* [[DST_ADDR:%.*]], align 8
+; CHECK-NEXT: store <2 x i32> [[ADD]], ptr [[DST_ADDR:%.*]], align 8
; CHECK-NEXT: ret void
;
%val = trunc <2 x i64> %src to <2 x i32>
%add = add <2 x i32> %val, <i32 1, i32 1>
- store <2 x i32> %add, <2 x i32>* %dst.addr
+ store <2 x i32> %add, ptr %dst.addr
ret void
}
%dim31 = insertelement <4 x i32> %dim30, i32 %a, i32 2
%dim32 = insertelement <4 x i32> %dim31, i32 %a, i32 3
- %offset_ptr = getelementptr <4 x float>, <4 x float>* null, i32 1
- %offset_int = ptrtoint <4 x float>* %offset_ptr to i64
+ %offset_ptr = getelementptr <4 x float>, ptr null, i32 1
+ %offset_int = ptrtoint ptr %offset_ptr to i64
%sizeof32 = trunc i64 %offset_int to i32
%smearinsert33 = insertelement <4 x i32> undef, i32 %sizeof32, i32 0
@G1 = global i8 zeroinitializer
-define <2 x i1> @test(<2 x i8*> %a, <2 x i8*> %b) {
+define <2 x i1> @test(<2 x ptr> %a, <2 x ptr> %b) {
; CHECK-LABEL: @test(
-; CHECK-NEXT: [[C:%.*]] = icmp eq <2 x i8*> [[A:%.*]], [[B:%.*]]
+; CHECK-NEXT: [[C:%.*]] = icmp eq <2 x ptr> [[A:%.*]], [[B:%.*]]
; CHECK-NEXT: ret <2 x i1> [[C]]
;
- %c = icmp eq <2 x i8*> %a, %b
+ %c = icmp eq <2 x ptr> %a, %b
ret <2 x i1> %c
}
-define <2 x i1> @test2(<2 x i8*> %a) {
+define <2 x i1> @test2(<2 x ptr> %a) {
; CHECK-LABEL: @test2(
; CHECK-NEXT: ret <2 x i1> zeroinitializer
;
- %c = inttoptr <2 x i32> <i32 1, i32 2> to <2 x i8*>
- %d = icmp ult <2 x i8*> %c, zeroinitializer
+ %c = inttoptr <2 x i32> <i32 1, i32 2> to <2 x ptr>
+ %d = icmp ult <2 x ptr> %c, zeroinitializer
ret <2 x i1> %d
}
-define <2 x i1> @test3(<2 x i8*> %a) {
+define <2 x i1> @test3(<2 x ptr> %a) {
; CHECK-LABEL: @test3(
; CHECK-NEXT: ret <2 x i1> zeroinitializer
;
- %g = getelementptr i8, <2 x i8*> %a, <2 x i32> <i32 1, i32 0>
- %B = icmp ult <2 x i8*> %g, zeroinitializer
+ %g = getelementptr i8, <2 x ptr> %a, <2 x i32> <i32 1, i32 0>
+ %B = icmp ult <2 x ptr> %g, zeroinitializer
ret <2 x i1> %B
}
-define <1 x i1> @test4(<1 x i8*> %a) {
+define <1 x i1> @test4(<1 x ptr> %a) {
; CHECK-LABEL: @test4(
; CHECK-NEXT: ret <1 x i1> zeroinitializer
;
- %g = getelementptr i8, <1 x i8*> %a, <1 x i32> <i32 1>
- %B = icmp ult <1 x i8*> %g, zeroinitializer
+ %g = getelementptr i8, <1 x ptr> %a, <1 x i32> <i32 1>
+ %B = icmp ult <1 x ptr> %g, zeroinitializer
ret <1 x i1> %B
}
-define <2 x i1> @test5(<2 x i8*> %a) {
+define <2 x i1> @test5(<2 x ptr> %a) {
; CHECK-LABEL: @test5(
; CHECK-NEXT: ret <2 x i1> zeroinitializer
;
- %w = getelementptr i8, <2 x i8*> %a, <2 x i32> zeroinitializer
- %e = getelementptr i8, <2 x i8*> %w, <2 x i32> <i32 5, i32 9>
- %g = getelementptr i8, <2 x i8*> %e, <2 x i32> <i32 1, i32 0>
- %B = icmp ult <2 x i8*> %g, zeroinitializer
+ %w = getelementptr i8, <2 x ptr> %a, <2 x i32> zeroinitializer
+ %e = getelementptr i8, <2 x ptr> %w, <2 x i32> <i32 5, i32 9>
+ %g = getelementptr i8, <2 x ptr> %e, <2 x i32> <i32 1, i32 0>
+ %B = icmp ult <2 x ptr> %g, zeroinitializer
ret <2 x i1> %B
}
-define <2 x i32*> @test7(<2 x {i32, i32}*> %a) {
+define <2 x ptr> @test7(<2 x ptr> %a) {
; CHECK-LABEL: @test7(
-; CHECK-NEXT: [[W:%.*]] = getelementptr { i32, i32 }, <2 x { i32, i32 }*> [[A:%.*]], <2 x i64> <i64 5, i64 9>, <2 x i32> zeroinitializer
-; CHECK-NEXT: ret <2 x i32*> [[W]]
+; CHECK-NEXT: [[W:%.*]] = getelementptr { i32, i32 }, <2 x ptr> [[A:%.*]], <2 x i64> <i64 5, i64 9>, <2 x i32> zeroinitializer
+; CHECK-NEXT: ret <2 x ptr> [[W]]
;
- %w = getelementptr {i32, i32}, <2 x {i32, i32}*> %a, <2 x i32> <i32 5, i32 9>, <2 x i32> zeroinitializer
- ret <2 x i32*> %w
+ %w = getelementptr {i32, i32}, <2 x ptr> %a, <2 x i32> <i32 5, i32 9>, <2 x i32> zeroinitializer
+ ret <2 x ptr> %w
}
define <vscale x 2 x i1> @test8() {
;
%ins = insertelement <vscale x 2 x i32> poison, i32 1, i32 0
%b = shufflevector <vscale x 2 x i32> %ins, <vscale x 2 x i32> poison, <vscale x 2 x i32> zeroinitializer
- %c = inttoptr <vscale x 2 x i32> %b to <vscale x 2 x i8*>
- %d = icmp ult <vscale x 2 x i8*> %c, zeroinitializer
+ %c = inttoptr <vscale x 2 x i32> %b to <vscale x 2 x ptr>
+ %d = icmp ult <vscale x 2 x ptr> %c, zeroinitializer
ret <vscale x 2 x i1> %d
}
@G1 = global i8 zeroinitializer
-define <2 x i1> @test(<2 x i8*> %a, <2 x i8*> %b) {
+define <2 x i1> @test(<2 x ptr> %a, <2 x ptr> %b) {
; CHECK-LABEL: @test(
-; CHECK-NEXT: [[C:%.*]] = icmp eq <2 x i8*> [[A:%.*]], [[B:%.*]]
+; CHECK-NEXT: [[C:%.*]] = icmp eq <2 x ptr> [[A:%.*]], [[B:%.*]]
; CHECK-NEXT: ret <2 x i1> [[C]]
;
- %c = icmp eq <2 x i8*> %a, %b
+ %c = icmp eq <2 x ptr> %a, %b
ret <2 x i1> %c
}
-define <2 x i1> @test2(<2 x i8*> %a) {
+define <2 x i1> @test2(<2 x ptr> %a) {
; CHECK-LABEL: @test2(
; CHECK-NEXT: ret <2 x i1> zeroinitializer
;
- %c = inttoptr <2 x i32> <i32 1, i32 2> to <2 x i8*>
- %d = icmp ult <2 x i8*> %c, zeroinitializer
+ %c = inttoptr <2 x i32> <i32 1, i32 2> to <2 x ptr>
+ %d = icmp ult <2 x ptr> %c, zeroinitializer
ret <2 x i1> %d
}
-define <2 x i1> @test3(<2 x i8*> %a) {
+define <2 x i1> @test3(<2 x ptr> %a) {
; CHECK-LABEL: @test3(
; CHECK-NEXT: ret <2 x i1> zeroinitializer
;
- %g = getelementptr i8, <2 x i8*> %a, <2 x i32> <i32 1, i32 0>
- %B = icmp ult <2 x i8*> %g, zeroinitializer
+ %g = getelementptr i8, <2 x ptr> %a, <2 x i32> <i32 1, i32 0>
+ %B = icmp ult <2 x ptr> %g, zeroinitializer
ret <2 x i1> %B
}
-define <1 x i1> @test4(<1 x i8*> %a) {
+define <1 x i1> @test4(<1 x ptr> %a) {
; CHECK-LABEL: @test4(
; CHECK-NEXT: ret <1 x i1> zeroinitializer
;
- %g = getelementptr i8, <1 x i8*> %a, <1 x i32> <i32 1>
- %B = icmp ult <1 x i8*> %g, zeroinitializer
+ %g = getelementptr i8, <1 x ptr> %a, <1 x i32> <i32 1>
+ %B = icmp ult <1 x ptr> %g, zeroinitializer
ret <1 x i1> %B
}
-define <2 x i1> @test5(<2 x i8*> %a) {
+define <2 x i1> @test5(<2 x ptr> %a) {
; CHECK-LABEL: @test5(
; CHECK-NEXT: ret <2 x i1> zeroinitializer
;
- %w = getelementptr i8, <2 x i8*> %a, <2 x i32> zeroinitializer
- %e = getelementptr i8, <2 x i8*> %w, <2 x i32> <i32 5, i32 9>
- %g = getelementptr i8, <2 x i8*> %e, <2 x i32> <i32 1, i32 0>
- %B = icmp ult <2 x i8*> %g, zeroinitializer
+ %w = getelementptr i8, <2 x ptr> %a, <2 x i32> zeroinitializer
+ %e = getelementptr i8, <2 x ptr> %w, <2 x i32> <i32 5, i32 9>
+ %g = getelementptr i8, <2 x ptr> %e, <2 x i32> <i32 1, i32 0>
+ %B = icmp ult <2 x ptr> %g, zeroinitializer
ret <2 x i1> %B
}
-define <2 x i32*> @test7(<2 x {i32, i32}*> %a) {
+define <2 x ptr> @test7(<2 x ptr> %a) {
; CHECK-LABEL: @test7(
-; CHECK-NEXT: [[W:%.*]] = getelementptr { i32, i32 }, <2 x { i32, i32 }*> [[A:%.*]], <2 x i64> <i64 5, i64 9>, <2 x i32> zeroinitializer
-; CHECK-NEXT: ret <2 x i32*> [[W]]
+; CHECK-NEXT: [[W:%.*]] = getelementptr { i32, i32 }, <2 x ptr> [[A:%.*]], <2 x i64> <i64 5, i64 9>, <2 x i32> zeroinitializer
+; CHECK-NEXT: ret <2 x ptr> [[W]]
;
- %w = getelementptr {i32, i32}, <2 x {i32, i32}*> %a, <2 x i32> <i32 5, i32 9>, <2 x i32> zeroinitializer
- ret <2 x i32*> %w
+ %w = getelementptr {i32, i32}, <2 x ptr> %a, <2 x i32> <i32 5, i32 9>, <2 x i32> zeroinitializer
+ ret <2 x ptr> %w
}
define <vscale x 2 x i1> @test8() {
;
%ins = insertelement <vscale x 2 x i32> undef, i32 1, i32 0
%b = shufflevector <vscale x 2 x i32> %ins, <vscale x 2 x i32> undef, <vscale x 2 x i32> zeroinitializer
- %c = inttoptr <vscale x 2 x i32> %b to <vscale x 2 x i8*>
- %d = icmp ult <vscale x 2 x i8*> %c, zeroinitializer
+ %c = inttoptr <vscale x 2 x i32> %b to <vscale x 2 x ptr>
+ %d = icmp ult <vscale x 2 x ptr> %c, zeroinitializer
ret <vscale x 2 x i1> %d
}
target datalayout = "e-p:64:64:64-i1:8:8-i8:8:8-i16:16:16-i32:32:32-i64:64:64-f32:32:32-f64:64:64-v64:64:64-v128:128:128-a0:0:64-s0:64:64-f80:128:128-n8:16:32:64-S128"
target triple = "x86_64-unknown-linux-gnu"
-define <2 x i8*> @testa(<2 x i8*> %a) {
+define <2 x ptr> @testa(<2 x ptr> %a) {
; CHECK-LABEL: @testa(
-; CHECK-NEXT: [[G:%.*]] = getelementptr i8, <2 x i8*> [[A:%.*]], <2 x i64> <i64 0, i64 1>
-; CHECK-NEXT: ret <2 x i8*> [[G]]
+; CHECK-NEXT: [[G:%.*]] = getelementptr i8, <2 x ptr> [[A:%.*]], <2 x i64> <i64 0, i64 1>
+; CHECK-NEXT: ret <2 x ptr> [[G]]
;
- %g = getelementptr i8, <2 x i8*> %a, <2 x i32> <i32 0, i32 1>
- ret <2 x i8*> %g
+ %g = getelementptr i8, <2 x ptr> %a, <2 x i32> <i32 0, i32 1>
+ ret <2 x ptr> %g
}
-define <8 x double*> @vgep_s_v8i64(double* %a, <8 x i64>%i) {
+define <8 x ptr> @vgep_s_v8i64(ptr %a, <8 x i64>%i) {
; CHECK-LABEL: @vgep_s_v8i64(
-; CHECK-NEXT: [[VECTORGEP:%.*]] = getelementptr double, double* [[A:%.*]], <8 x i64> [[I:%.*]]
-; CHECK-NEXT: ret <8 x double*> [[VECTORGEP]]
+; CHECK-NEXT: [[VECTORGEP:%.*]] = getelementptr double, ptr [[A:%.*]], <8 x i64> [[I:%.*]]
+; CHECK-NEXT: ret <8 x ptr> [[VECTORGEP]]
;
- %VectorGep = getelementptr double, double* %a, <8 x i64> %i
- ret <8 x double*> %VectorGep
+ %VectorGep = getelementptr double, ptr %a, <8 x i64> %i
+ ret <8 x ptr> %VectorGep
}
-define <8 x double*> @vgep_s_v8i32(double* %a, <8 x i32>%i) {
+define <8 x ptr> @vgep_s_v8i32(ptr %a, <8 x i32>%i) {
; CHECK-LABEL: @vgep_s_v8i32(
; CHECK-NEXT: [[TMP1:%.*]] = sext <8 x i32> [[I:%.*]] to <8 x i64>
-; CHECK-NEXT: [[VECTORGEP:%.*]] = getelementptr double, double* [[A:%.*]], <8 x i64> [[TMP1]]
-; CHECK-NEXT: ret <8 x double*> [[VECTORGEP]]
+; CHECK-NEXT: [[VECTORGEP:%.*]] = getelementptr double, ptr [[A:%.*]], <8 x i64> [[TMP1]]
+; CHECK-NEXT: ret <8 x ptr> [[VECTORGEP]]
;
- %VectorGep = getelementptr double, double* %a, <8 x i32> %i
- ret <8 x double*> %VectorGep
+ %VectorGep = getelementptr double, ptr %a, <8 x i32> %i
+ ret <8 x ptr> %VectorGep
}
-define <8 x i8*> @vgep_v8iPtr_i32(<8 x i8*> %a, i32 %i) {
+define <8 x ptr> @vgep_v8iPtr_i32(<8 x ptr> %a, i32 %i) {
; CHECK-LABEL: @vgep_v8iPtr_i32(
; CHECK-NEXT: [[TMP1:%.*]] = sext i32 [[I:%.*]] to i64
-; CHECK-NEXT: [[VECTORGEP:%.*]] = getelementptr i8, <8 x i8*> [[A:%.*]], i64 [[TMP1]]
-; CHECK-NEXT: ret <8 x i8*> [[VECTORGEP]]
+; CHECK-NEXT: [[VECTORGEP:%.*]] = getelementptr i8, <8 x ptr> [[A:%.*]], i64 [[TMP1]]
+; CHECK-NEXT: ret <8 x ptr> [[VECTORGEP]]
;
- %VectorGep = getelementptr i8, <8 x i8*> %a, i32 %i
- ret <8 x i8*> %VectorGep
+ %VectorGep = getelementptr i8, <8 x ptr> %a, i32 %i
+ ret <8 x ptr> %VectorGep
}
; See llvm.org/D75644 and llvm.org/D75505
target datalayout = "e-p:64:64-i32:32:32-i64:64:64-f32:32:32-f64:64:64"
-define float @float_load(i32* %addr) {
+define float @float_load(ptr %addr) {
; CHECK-LABEL: @float_load(
-; CHECK: %i32 = load volatile i32, i32* %addr, align 4
+; CHECK: %i32 = load volatile i32, ptr %addr, align 4
; CHECK-NEXT: %float = bitcast i32 %i32 to float
; CHECK-NEXT: ret float %float
- %i32 = load volatile i32, i32* %addr, align 4
+ %i32 = load volatile i32, ptr %addr, align 4
%float = bitcast i32 %i32 to float
ret float %float
}
-define i32 @i32_load(float* %addr) {
+define i32 @i32_load(ptr %addr) {
; CHECK-LABEL: @i32_load(
-; CHECK: %float = load volatile float, float* %addr, align 4
+; CHECK: %float = load volatile float, ptr %addr, align 4
; CHECK-NEXT: %i32 = bitcast float %float to i32
; CHECK-NEXT: ret i32 %i32
- %float = load volatile float, float* %addr, align 4
+ %float = load volatile float, ptr %addr, align 4
%i32 = bitcast float %float to i32
ret i32 %i32
}
-define double @double_load(i64* %addr) {
+define double @double_load(ptr %addr) {
; CHECK-LABEL: @double_load(
-; CHECK: %i64 = load volatile i64, i64* %addr, align 8
+; CHECK: %i64 = load volatile i64, ptr %addr, align 8
; CHECK-NEXT: %double = bitcast i64 %i64 to double
; CHECK-NEXT: ret double %double
- %i64 = load volatile i64, i64* %addr, align 8
+ %i64 = load volatile i64, ptr %addr, align 8
%double = bitcast i64 %i64 to double
ret double %double
}
-define i64 @i64_load(double* %addr) {
+define i64 @i64_load(ptr %addr) {
; CHECK-LABEL: @i64_load(
-; CHECK: %double = load volatile double, double* %addr, align 8
+; CHECK: %double = load volatile double, ptr %addr, align 8
; CHECK-NEXT: %i64 = bitcast double %double to i64
; CHECK-NEXT: ret i64 %i64
- %double = load volatile double, double* %addr, align 8
+ %double = load volatile double, ptr %addr, align 8
%i64 = bitcast double %double to i64
ret i64 %i64
}
-define i8* @ptr_load(i64* %addr) {
+define ptr @ptr_load(ptr %addr) {
; CHECK-LABEL: @ptr_load(
-; CHECK: %i64 = load volatile i64, i64* %addr, align 8
-; CHECK-NEXT: %ptr = inttoptr i64 %i64 to i8*
-; CHECK-NEXT: ret i8* %ptr
- %i64 = load volatile i64, i64* %addr, align 8
- %ptr = inttoptr i64 %i64 to i8*
- ret i8* %ptr
+; CHECK: %i64 = load volatile i64, ptr %addr, align 8
+; CHECK-NEXT: %ptr = inttoptr i64 %i64 to ptr
+; CHECK-NEXT: ret ptr %ptr
+ %i64 = load volatile i64, ptr %addr, align 8
+ %ptr = inttoptr i64 %i64 to ptr
+ ret ptr %ptr
}
define void @self_assign_1() {
; CHECK-LABEL: @self_assign_1(
; CHECK-NEXT: entry:
-; CHECK-NEXT: [[TMP:%.*]] = load volatile i32, i32* @x, align 4
-; CHECK-NEXT: store volatile i32 [[TMP]], i32* @x, align 4
+; CHECK-NEXT: [[TMP:%.*]] = load volatile i32, ptr @x, align 4
+; CHECK-NEXT: store volatile i32 [[TMP]], ptr @x, align 4
; CHECK-NEXT: br label [[RETURN:%.*]]
; CHECK: return:
; CHECK-NEXT: ret void
;
entry:
- %tmp = load volatile i32, i32* @x
- store volatile i32 %tmp, i32* @x
+ %tmp = load volatile i32, ptr @x
+ store volatile i32 %tmp, ptr @x
br label %return
return:
ret void
}
-define void @volatile_store_before_unreachable(i1 %c, i8* %p) {
+define void @volatile_store_before_unreachable(i1 %c, ptr %p) {
; CHECK-LABEL: @volatile_store_before_unreachable(
; CHECK-NEXT: br i1 [[C:%.*]], label [[TRUE:%.*]], label [[FALSE:%.*]]
; CHECK: true:
-; CHECK-NEXT: store volatile i8 0, i8* [[P:%.*]], align 1
+; CHECK-NEXT: store volatile i8 0, ptr [[P:%.*]], align 1
; CHECK-NEXT: unreachable
; CHECK: false:
; CHECK-NEXT: ret void
br i1 %c, label %true, label %false
true:
- store volatile i8 0, i8* %p
+ store volatile i8 0, ptr %p
unreachable
false:
; CHECK-NEXT: ret <vscale x 4 x i32> [[Z:%.*]]
;
%a = alloca <vscale x 4 x i32>
- store <vscale x 4 x i32> %z, <vscale x 4 x i32>* %a
- %load = load <vscale x 4 x i32>, <vscale x 4 x i32>* %a
+ store <vscale x 4 x i32> %z, ptr %a
+ %load = load <vscale x 4 x i32>, ptr %a
ret <vscale x 4 x i32> %load
}
; CHECK-NEXT: ret void
;
%a = alloca <vscale x 4 x i32>
- store <vscale x 4 x i32> %z, <vscale x 4 x i32>* %a
+ store <vscale x 4 x i32> %z, ptr %a
ret void
}
; CHECK-LABEL: @alloca_zero_byte_move_first_inst(
; CHECK-NEXT: [[B:%.*]] = alloca {}, align 8
; CHECK-NEXT: [[A:%.*]] = alloca <vscale x 16 x i8>, align 16
-; CHECK-NEXT: call void (...) @use(<vscale x 16 x i8>* nonnull [[A]])
-; CHECK-NEXT: call void (...) @use({}* nonnull [[B]])
+; CHECK-NEXT: call void (...) @use(ptr nonnull [[A]])
+; CHECK-NEXT: call void (...) @use(ptr nonnull [[B]])
; CHECK-NEXT: ret void
;
%a = alloca <vscale x 16 x i8>
- call void (...) @use( <vscale x 16 x i8>* %a )
+ call void (...) @use( ptr %a )
%b = alloca { }
- call void (...) @use( { }* %b )
+ call void (...) @use( ptr %b )
ret void
}
ret <vscale x 2 x i1> %cmp
}
-define <vscale x 2 x i1> @gep_scalevector1(i32* %X) nounwind {
+define <vscale x 2 x i1> @gep_scalevector1(ptr %X) nounwind {
; CHECK-LABEL: @gep_scalevector1(
-; CHECK-NEXT: [[DOTSPLATINSERT:%.*]] = insertelement <vscale x 2 x i32*> poison, i32* [[X:%.*]], i64 0
-; CHECK-NEXT: [[TMP1:%.*]] = icmp eq <vscale x 2 x i32*> [[DOTSPLATINSERT]], zeroinitializer
+; CHECK-NEXT: [[DOTSPLATINSERT:%.*]] = insertelement <vscale x 2 x ptr> poison, ptr [[X:%.*]], i64 0
+; CHECK-NEXT: [[TMP1:%.*]] = icmp eq <vscale x 2 x ptr> [[DOTSPLATINSERT]], zeroinitializer
; CHECK-NEXT: [[C:%.*]] = shufflevector <vscale x 2 x i1> [[TMP1]], <vscale x 2 x i1> poison, <vscale x 2 x i32> zeroinitializer
; CHECK-NEXT: ret <vscale x 2 x i1> [[C]]
;
- %A = getelementptr inbounds i32, i32* %X, <vscale x 2 x i64> zeroinitializer
- %C = icmp eq <vscale x 2 x i32*> %A, zeroinitializer
+ %A = getelementptr inbounds i32, ptr %X, <vscale x 2 x i64> zeroinitializer
+ %C = icmp eq <vscale x 2 x ptr> %A, zeroinitializer
ret <vscale x 2 x i1> %C
}
ret i1 %res
}
-define i64* @ext_lane_from_bitcast_of_splat(i32* %v) {
+define ptr @ext_lane_from_bitcast_of_splat(ptr %v) {
; CHECK-LABEL: @ext_lane_from_bitcast_of_splat(
; CHECK-NEXT: entry:
-; CHECK-NEXT: [[R:%.*]] = bitcast i32* [[V:%.*]] to i64*
-; CHECK-NEXT: ret i64* [[R]]
+; CHECK-NEXT: ret ptr [[V:%.*]]
;
entry:
- %in = insertelement <vscale x 4 x i32*> poison, i32* %v, i32 0
- %splat = shufflevector <vscale x 4 x i32*> %in, <vscale x 4 x i32*> poison, <vscale x 4 x i32> zeroinitializer
- %bc = bitcast <vscale x 4 x i32*> %splat to <vscale x 4 x i64*>
- %r = extractelement <vscale x 4 x i64*> %bc, i32 3
- ret i64* %r
+ %in = insertelement <vscale x 4 x ptr> poison, ptr %v, i32 0
+ %splat = shufflevector <vscale x 4 x ptr> %in, <vscale x 4 x ptr> poison, <vscale x 4 x i32> zeroinitializer
+ %bc = bitcast <vscale x 4 x ptr> %splat to <vscale x 4 x ptr>
+ %r = extractelement <vscale x 4 x ptr> %bc, i32 3
+ ret ptr %r
}
declare <vscale x 2 x i64> @llvm.experimental.stepvector.nxv2i64()
; RUN: opt -S -passes=instcombine < %s | FileCheck %s
; This test is used to verify we are not crashing at Assertion `CastInst::castIsValid(opc, C, Ty) && "Invalid constantexpr cast!".
-define <vscale x 2 x i8*> @gep_index_type_is_scalable(i8* %p) {
+define <vscale x 2 x ptr> @gep_index_type_is_scalable(ptr %p) {
; CHECK-LABEL: @gep_index_type_is_scalable(
-; CHECK-NEXT: [[GEP:%.*]] = getelementptr i8, i8* [[P:%.*]], <vscale x 2 x i64> undef
-; CHECK-NEXT: ret <vscale x 2 x i8*> [[GEP]]
+; CHECK-NEXT: [[GEP:%.*]] = getelementptr i8, ptr [[P:%.*]], <vscale x 2 x i64> undef
+; CHECK-NEXT: ret <vscale x 2 x ptr> [[GEP]]
;
- %gep = getelementptr i8, i8* %p, <vscale x 2 x i64> undef
- ret <vscale x 2 x i8*> %gep
+ %gep = getelementptr i8, ptr %p, <vscale x 2 x i64> undef
+ ret <vscale x 2 x ptr> %gep
}
; This test serves to verify code changes for "GEP.getNumIndices() == 1".
-define <vscale x 4 x i32>* @gep_num_of_indices_1(<vscale x 4 x i32>* %p) {
+define ptr @gep_num_of_indices_1(ptr %p) {
; CHECK-LABEL: @gep_num_of_indices_1(
-; CHECK-NEXT: [[GEP:%.*]] = getelementptr <vscale x 4 x i32>, <vscale x 4 x i32>* [[P:%.*]], i64 1
-; CHECK-NEXT: ret <vscale x 4 x i32>* [[GEP]]
+; CHECK-NEXT: [[GEP:%.*]] = getelementptr <vscale x 4 x i32>, ptr [[P:%.*]], i64 1
+; CHECK-NEXT: ret ptr [[GEP]]
;
- %gep = getelementptr <vscale x 4 x i32>, <vscale x 4 x i32>* %p, i64 1
- ret <vscale x 4 x i32>* %gep
+ %gep = getelementptr <vscale x 4 x i32>, ptr %p, i64 1
+ ret ptr %gep
}
; This test serves to verify code changes for "GEP.getNumOperands() == 2".
-define void @gep_bitcast(i8* %p) {
+define void @gep_bitcast(ptr %p) {
; CHECK-LABEL: @gep_bitcast(
-; CHECK-NEXT: [[CAST:%.*]] = bitcast i8* [[P:%.*]] to <vscale x 16 x i8>*
-; CHECK-NEXT: store <vscale x 16 x i8> zeroinitializer, <vscale x 16 x i8>* [[CAST]], align 16
-; CHECK-NEXT: [[GEP2:%.*]] = getelementptr <vscale x 16 x i8>, <vscale x 16 x i8>* [[CAST]], i64 1
-; CHECK-NEXT: store <vscale x 16 x i8> zeroinitializer, <vscale x 16 x i8>* [[GEP2]], align 16
+; CHECK-NEXT: store <vscale x 16 x i8> zeroinitializer, ptr [[P:%.*]], align 16
+; CHECK-NEXT: [[GEP2:%.*]] = getelementptr <vscale x 16 x i8>, ptr [[P:%.*]], i64 1
+; CHECK-NEXT: store <vscale x 16 x i8> zeroinitializer, ptr [[GEP2]], align 16
; CHECK-NEXT: ret void
;
- %cast = bitcast i8* %p to <vscale x 16 x i8>*
- %gep1 = getelementptr <vscale x 16 x i8>, <vscale x 16 x i8>* %cast, i64 0
- store <vscale x 16 x i8> zeroinitializer, <vscale x 16 x i8>* %gep1
- %gep2 = getelementptr <vscale x 16 x i8>, <vscale x 16 x i8>* %cast, i64 1
- store <vscale x 16 x i8> zeroinitializer, <vscale x 16 x i8>* %gep2
+ store <vscale x 16 x i8> zeroinitializer, ptr %p
+ %gep2 = getelementptr <vscale x 16 x i8>, ptr %p, i64 1
+ store <vscale x 16 x i8> zeroinitializer, ptr %gep2
ret void
}
define i32 @gep_alloca_inbounds_vscale_zero() {
; CHECK-LABEL: @gep_alloca_inbounds_vscale_zero(
; CHECK-NEXT: [[A:%.*]] = alloca <vscale x 4 x i32>, align 16
-; CHECK-NEXT: [[TMP:%.*]] = getelementptr inbounds <vscale x 4 x i32>, <vscale x 4 x i32>* [[A]], i64 0, i64 2
-; CHECK-NEXT: [[LOAD:%.*]] = load i32, i32* [[TMP]], align 8
+; CHECK-NEXT: [[TMP:%.*]] = getelementptr inbounds <vscale x 4 x i32>, ptr [[A]], i64 0, i64 2
+; CHECK-NEXT: [[LOAD:%.*]] = load i32, ptr [[TMP]], align 8
; CHECK-NEXT: ret i32 [[LOAD]]
;
%a = alloca <vscale x 4 x i32>
- %tmp = getelementptr <vscale x 4 x i32>, <vscale x 4 x i32>* %a, i32 0, i32 2
- %load = load i32, i32* %tmp
+ %tmp = getelementptr <vscale x 4 x i32>, ptr %a, i32 0, i32 2
+ %load = load i32, ptr %tmp
ret i32 %load
}
define i32 @gep_alloca_inbounds_vscale_nonzero() {
; CHECK-LABEL: @gep_alloca_inbounds_vscale_nonzero(
; CHECK-NEXT: [[A:%.*]] = alloca <vscale x 4 x i32>, align 16
-; CHECK-NEXT: [[TMP:%.*]] = getelementptr <vscale x 4 x i32>, <vscale x 4 x i32>* [[A]], i64 1, i64 2
-; CHECK-NEXT: [[LOAD:%.*]] = load i32, i32* [[TMP]], align 8
+; CHECK-NEXT: [[TMP:%.*]] = getelementptr <vscale x 4 x i32>, ptr [[A]], i64 1, i64 2
+; CHECK-NEXT: [[LOAD:%.*]] = load i32, ptr [[TMP]], align 8
; CHECK-NEXT: ret i32 [[LOAD]]
;
%a = alloca <vscale x 4 x i32>
- %tmp = getelementptr <vscale x 4 x i32>, <vscale x 4 x i32>* %a, i32 1, i32 2
- %load = load i32, i32* %tmp
+ %tmp = getelementptr <vscale x 4 x i32>, ptr %a, i32 1, i32 2
+ %load = load i32, ptr %tmp
ret i32 %load
}
; CHECK-NEXT: [[IN:%.*]] = insertelement <vscale x 4 x i32> poison, i32 [[V:%.*]], i64 0
; CHECK-NEXT: [[SPLAT:%.*]] = shufflevector <vscale x 4 x i32> [[IN]], <vscale x 4 x i32> poison, <vscale x 4 x i32> zeroinitializer
; CHECK-NEXT: [[I1:%.*]] = insertelement <vscale x 4 x i32> [[SPLAT]], i32 undef, i64 128
-; CHECK-NEXT: store <vscale x 4 x i32> [[I1]], <vscale x 4 x i32>* undef, align 16
+; CHECK-NEXT: store <vscale x 4 x i32> [[I1]], ptr undef, align 16
; CHECK-NEXT: ret void
;
%in = insertelement <vscale x 4 x i32> poison, i32 %v, i32 0
%splat = shufflevector <vscale x 4 x i32> %in, <vscale x 4 x i32> poison, <vscale x 4 x i32> zeroinitializer
%I1 = insertelement <vscale x 4 x i32> %splat, i32 undef, i8 -128
- store <vscale x 4 x i32> %I1, <vscale x 4 x i32>* undef, align 16
+ store <vscale x 4 x i32> %I1, ptr undef, align 16
ret void
}
; CHECK-NEXT: [[IN:%.*]] = insertelement <vscale x 4 x i32> undef, i32 [[V:%.*]], i64 0
; CHECK-NEXT: [[SPLAT:%.*]] = shufflevector <vscale x 4 x i32> [[IN]], <vscale x 4 x i32> undef, <vscale x 4 x i32> zeroinitializer
; CHECK-NEXT: [[I1:%.*]] = insertelement <vscale x 4 x i32> [[SPLAT]], i32 undef, i64 128
-; CHECK-NEXT: store <vscale x 4 x i32> [[I1]], <vscale x 4 x i32>* undef, align 16
+; CHECK-NEXT: store <vscale x 4 x i32> [[I1]], ptr undef, align 16
; CHECK-NEXT: ret void
;
%in = insertelement <vscale x 4 x i32> undef, i32 %v, i32 0
%splat = shufflevector <vscale x 4 x i32> %in, <vscale x 4 x i32> undef, <vscale x 4 x i32> zeroinitializer
%I1 = insertelement <vscale x 4 x i32> %splat, i32 undef, i8 -128
- store <vscale x 4 x i32> %I1, <vscale x 4 x i32>* undef, align 16
+ store <vscale x 4 x i32> %I1, ptr undef, align 16
ret void
}
; NOTE: Assertions have been autogenerated by utils/update_test_checks.py
; RUN: opt < %s -instcombine -S -verify | FileCheck %s
-define <2 x i8> @constprop_load_bitcast(<vscale x 16 x i1>* %ptr) {
+define <2 x i8> @constprop_load_bitcast(ptr %ptr) {
; CHECK-LABEL: @constprop_load_bitcast(
-; CHECK-NEXT: store <vscale x 16 x i1> zeroinitializer, <vscale x 16 x i1>* [[PTR:%.*]], align 16
+; CHECK-NEXT: store <vscale x 16 x i1> zeroinitializer, ptr [[PTR:%.*]], align 16
; CHECK-NEXT: ret <2 x i8> zeroinitializer
;
- store <vscale x 16 x i1> zeroinitializer, <vscale x 16 x i1>* %ptr, align 16
- %cast_to_fixed = bitcast <vscale x 16 x i1>* %ptr to <2 x i8>*
- %a = load <2 x i8>, <2 x i8>* %cast_to_fixed, align 16
+ store <vscale x 16 x i1> zeroinitializer, ptr %ptr, align 16
+ %a = load <2 x i8>, ptr %ptr, align 16
ret <2 x i8> %a
}
; vscale-sized vec not guaranteed to fill destination.
-define <8 x i8> @constprop_load_bitcast_neg(<vscale x 16 x i1>* %ptr) {
+define <8 x i8> @constprop_load_bitcast_neg(ptr %ptr) {
; CHECK-LABEL: @constprop_load_bitcast_neg(
-; CHECK-NEXT: store <vscale x 16 x i1> zeroinitializer, <vscale x 16 x i1>* [[PTR:%.*]], align 16
-; CHECK-NEXT: [[CAST_TO_FIXED:%.*]] = bitcast <vscale x 16 x i1>* [[PTR]] to <8 x i8>*
-; CHECK-NEXT: [[A:%.*]] = load <8 x i8>, <8 x i8>* [[CAST_TO_FIXED]], align 16
+; CHECK-NEXT: store <vscale x 16 x i1> zeroinitializer, ptr [[PTR:%.*]], align 16
+; CHECK-NEXT: [[A:%.*]] = load <8 x i8>, ptr [[PTR]], align 16
; CHECK-NEXT: ret <8 x i8> [[A]]
;
- store <vscale x 16 x i1> zeroinitializer, <vscale x 16 x i1>* %ptr, align 16
- %cast_to_fixed = bitcast <vscale x 16 x i1>* %ptr to <8 x i8>*
- %a = load <8 x i8>, <8 x i8>* %cast_to_fixed, align 16
+ store <vscale x 16 x i1> zeroinitializer, ptr %ptr, align 16
+ %a = load <8 x i8>, ptr %ptr, align 16
ret <8 x i8> %a
}
@hello = constant [6 x i32] [i32 104, i32 101, i32 108, i32 108, i32 111, i32 0]
-declare i64 @wcslen(i32*, i32)
+declare i64 @wcslen(ptr, i32)
define i64 @test_no_simplify1() {
; CHECK-LABEL: @test_no_simplify1(
- %hello_p = getelementptr [6 x i32], [6 x i32]* @hello, i64 0, i64 0
- %hello_l = call i64 @wcslen(i32* %hello_p, i32 187)
+ %hello_l = call i64 @wcslen(ptr @hello, i32 187)
; CHECK-NEXT: %hello_l = call i64 @wcslen
ret i64 %hello_l
; CHECK-NEXT: ret i64 %hello_l
; CHECK: ret i32 %temp1
entry:
- %str1 = getelementptr inbounds [2 x i8], [2 x i8]* @fake_init, i64 0, i64 0
- %str2 = getelementptr inbounds [2 x i8], [2 x i8]* @.str, i64 0, i64 0
- %temp1 = call i32 @strcmp(i8* %str1, i8* %str2) nounwind readonly
+ %temp1 = call i32 @strcmp(ptr @fake_init, ptr @.str) nounwind readonly
ret i32 %temp1
}
; CHECK: ret i32 0
entry:
- %str1 = getelementptr inbounds [2 x i8], [2 x i8]* @real_init, i64 0, i64 0
- %str2 = getelementptr inbounds [2 x i8], [2 x i8]* @.str, i64 0, i64 0
- %temp1 = call i32 @strcmp(i8* %str1, i8* %str2) nounwind readonly
+ %temp1 = call i32 @strcmp(ptr @real_init, ptr @.str) nounwind readonly
ret i32 %temp1
}
-declare i32 @strcmp(i8*, i8*) nounwind readonly
+declare i32 @strcmp(ptr, ptr) nounwind readonly
ret i8 %y
}
-define i8 @uaddtest2(i8 %A, i8 %B, i1* %overflowPtr) {
+define i8 @uaddtest2(i8 %A, i8 %B, ptr %overflowPtr) {
; CHECK-LABEL: @uaddtest2(
; CHECK-NEXT: [[AND_A:%.*]] = and i8 [[A:%.*]], 127
; CHECK-NEXT: [[AND_B:%.*]] = and i8 [[B:%.*]], 127
; CHECK-NEXT: [[X:%.*]] = add nuw i8 [[AND_A]], [[AND_B]]
-; CHECK-NEXT: store i1 false, i1* [[OVERFLOWPTR:%.*]], align 1
+; CHECK-NEXT: store i1 false, ptr [[OVERFLOWPTR:%.*]], align 1
; CHECK-NEXT: ret i8 [[X]]
;
%and.A = and i8 %A, 127
%x = call { i8, i1 } @llvm.uadd.with.overflow.i8(i8 %and.A, i8 %and.B)
%y = extractvalue { i8, i1 } %x, 0
%z = extractvalue { i8, i1 } %x, 1
- store i1 %z, i1* %overflowPtr
+ store i1 %z, ptr %overflowPtr
ret i8 %y
}
-define i8 @uaddtest3(i8 %A, i8 %B, i1* %overflowPtr) {
+define i8 @uaddtest3(i8 %A, i8 %B, ptr %overflowPtr) {
; CHECK-LABEL: @uaddtest3(
; CHECK-NEXT: [[OR_A:%.*]] = or i8 [[A:%.*]], -128
; CHECK-NEXT: [[OR_B:%.*]] = or i8 [[B:%.*]], -128
; CHECK-NEXT: [[X:%.*]] = add i8 [[OR_A]], [[OR_B]]
-; CHECK-NEXT: store i1 true, i1* [[OVERFLOWPTR:%.*]], align 1
+; CHECK-NEXT: store i1 true, ptr [[OVERFLOWPTR:%.*]], align 1
; CHECK-NEXT: ret i8 [[X]]
;
%or.A = or i8 %A, -128
%x = call { i8, i1 } @llvm.uadd.with.overflow.i8(i8 %or.A, i8 %or.B)
%y = extractvalue { i8, i1 } %x, 0
%z = extractvalue { i8, i1 } %x, 1
- store i1 %z, i1* %overflowPtr
+ store i1 %z, ptr %overflowPtr
ret i8 %y
}
-define i8 @uaddtest4(i8 %A, i1* %overflowPtr) {
+define i8 @uaddtest4(i8 %A, ptr %overflowPtr) {
; CHECK-LABEL: @uaddtest4(
-; CHECK-NEXT: store i1 false, i1* [[OVERFLOWPTR:%.*]], align 1
+; CHECK-NEXT: store i1 false, ptr [[OVERFLOWPTR:%.*]], align 1
; CHECK-NEXT: ret i8 -1
;
%x = call { i8, i1 } @llvm.uadd.with.overflow.i8(i8 undef, i8 %A)
%y = extractvalue { i8, i1 } %x, 0
%z = extractvalue { i8, i1 } %x, 1
- store i1 %z, i1* %overflowPtr
+ store i1 %z, ptr %overflowPtr
ret i8 %y
}
-define i8 @uaddtest5(i8 %A, i1* %overflowPtr) {
+define i8 @uaddtest5(i8 %A, ptr %overflowPtr) {
; CHECK-LABEL: @uaddtest5(
-; CHECK-NEXT: store i1 false, i1* [[OVERFLOWPTR:%.*]], align 1
+; CHECK-NEXT: store i1 false, ptr [[OVERFLOWPTR:%.*]], align 1
; CHECK-NEXT: ret i8 [[A:%.*]]
;
%x = call { i8, i1 } @llvm.uadd.with.overflow.i8(i8 0, i8 %A)
%y = extractvalue { i8, i1 } %x, 0
%z = extractvalue { i8, i1 } %x, 1
- store i1 %z, i1* %overflowPtr
+ store i1 %z, ptr %overflowPtr
ret i8 %y
}
ret { i32, i1 } %x
}
-define i8 @umultest1(i8 %A, i1* %overflowPtr) {
+define i8 @umultest1(i8 %A, ptr %overflowPtr) {
; CHECK-LABEL: @umultest1(
-; CHECK-NEXT: store i1 false, i1* [[OVERFLOWPTR:%.*]], align 1
+; CHECK-NEXT: store i1 false, ptr [[OVERFLOWPTR:%.*]], align 1
; CHECK-NEXT: ret i8 0
;
%x = call { i8, i1 } @llvm.umul.with.overflow.i8(i8 0, i8 %A)
%y = extractvalue { i8, i1 } %x, 0
%z = extractvalue { i8, i1 } %x, 1
- store i1 %z, i1* %overflowPtr
+ store i1 %z, ptr %overflowPtr
ret i8 %y
}
-define i8 @umultest2(i8 %A, i1* %overflowPtr) {
+define i8 @umultest2(i8 %A, ptr %overflowPtr) {
; CHECK-LABEL: @umultest2(
-; CHECK-NEXT: store i1 false, i1* [[OVERFLOWPTR:%.*]], align 1
+; CHECK-NEXT: store i1 false, ptr [[OVERFLOWPTR:%.*]], align 1
; CHECK-NEXT: ret i8 [[A:%.*]]
;
%x = call { i8, i1 } @llvm.umul.with.overflow.i8(i8 1, i8 %A)
%y = extractvalue { i8, i1 } %x, 0
%z = extractvalue { i8, i1 } %x, 1
- store i1 %z, i1* %overflowPtr
+ store i1 %z, ptr %overflowPtr
ret i8 %y
}
ret { i32, i1 } %x
}
-define i1 @uadd_res_ult_x(i32 %x, i32 %y, i1* %p) nounwind {
+define i1 @uadd_res_ult_x(i32 %x, i32 %y, ptr %p) nounwind {
; CHECK-LABEL: @uadd_res_ult_x(
; CHECK-NEXT: [[A:%.*]] = call { i32, i1 } @llvm.uadd.with.overflow.i32(i32 [[X:%.*]], i32 [[Y:%.*]])
; CHECK-NEXT: [[B:%.*]] = extractvalue { i32, i1 } [[A]], 1
-; CHECK-NEXT: store i1 [[B]], i1* [[P:%.*]], align 1
+; CHECK-NEXT: store i1 [[B]], ptr [[P:%.*]], align 1
; CHECK-NEXT: [[D:%.*]] = extractvalue { i32, i1 } [[A]], 1
; CHECK-NEXT: ret i1 [[D]]
;
%a = call { i32, i1 } @llvm.uadd.with.overflow.i32(i32 %x, i32 %y)
%b = extractvalue { i32, i1 } %a, 1
- store i1 %b, i1* %p
+ store i1 %b, ptr %p
%c = extractvalue { i32, i1 } %a, 0
%d = icmp ult i32 %c, %x
ret i1 %d
}
-define i1 @uadd_res_ult_y(i32 %x, i32 %y, i1* %p) nounwind {
+define i1 @uadd_res_ult_y(i32 %x, i32 %y, ptr %p) nounwind {
; CHECK-LABEL: @uadd_res_ult_y(
; CHECK-NEXT: [[A:%.*]] = call { i32, i1 } @llvm.uadd.with.overflow.i32(i32 [[X:%.*]], i32 [[Y:%.*]])
; CHECK-NEXT: [[B:%.*]] = extractvalue { i32, i1 } [[A]], 1
-; CHECK-NEXT: store i1 [[B]], i1* [[P:%.*]], align 1
+; CHECK-NEXT: store i1 [[B]], ptr [[P:%.*]], align 1
; CHECK-NEXT: [[D:%.*]] = extractvalue { i32, i1 } [[A]], 1
; CHECK-NEXT: ret i1 [[D]]
;
%a = call { i32, i1 } @llvm.uadd.with.overflow.i32(i32 %x, i32 %y)
%b = extractvalue { i32, i1 } %a, 1
- store i1 %b, i1* %p
+ store i1 %b, ptr %p
%c = extractvalue { i32, i1 } %a, 0
%d = icmp ult i32 %c, %y
ret i1 %d
}
-define i1 @uadd_res_ugt_x(i32 %xx, i32 %y, i1* %p) nounwind {
+define i1 @uadd_res_ugt_x(i32 %xx, i32 %y, ptr %p) nounwind {
; CHECK-LABEL: @uadd_res_ugt_x(
; CHECK-NEXT: [[X:%.*]] = urem i32 42, [[XX:%.*]]
; CHECK-NEXT: [[A:%.*]] = call { i32, i1 } @llvm.uadd.with.overflow.i32(i32 [[X]], i32 [[Y:%.*]])
; CHECK-NEXT: [[B:%.*]] = extractvalue { i32, i1 } [[A]], 1
-; CHECK-NEXT: store i1 [[B]], i1* [[P:%.*]], align 1
+; CHECK-NEXT: store i1 [[B]], ptr [[P:%.*]], align 1
; CHECK-NEXT: [[D:%.*]] = extractvalue { i32, i1 } [[A]], 1
; CHECK-NEXT: ret i1 [[D]]
;
%x = urem i32 42, %xx ; Thwart complexity-based canonicalization
%a = call { i32, i1 } @llvm.uadd.with.overflow.i32(i32 %x, i32 %y)
%b = extractvalue { i32, i1 } %a, 1
- store i1 %b, i1* %p
+ store i1 %b, ptr %p
%c = extractvalue { i32, i1 } %a, 0
%d = icmp ugt i32 %x, %c
ret i1 %d
}
-define i1 @uadd_res_ugt_y(i32 %x, i32 %yy, i1* %p) nounwind {
+define i1 @uadd_res_ugt_y(i32 %x, i32 %yy, ptr %p) nounwind {
; CHECK-LABEL: @uadd_res_ugt_y(
; CHECK-NEXT: [[Y:%.*]] = urem i32 42, [[YY:%.*]]
; CHECK-NEXT: [[A:%.*]] = call { i32, i1 } @llvm.uadd.with.overflow.i32(i32 [[X:%.*]], i32 [[Y]])
; CHECK-NEXT: [[B:%.*]] = extractvalue { i32, i1 } [[A]], 1
-; CHECK-NEXT: store i1 [[B]], i1* [[P:%.*]], align 1
+; CHECK-NEXT: store i1 [[B]], ptr [[P:%.*]], align 1
; CHECK-NEXT: [[D:%.*]] = extractvalue { i32, i1 } [[A]], 1
; CHECK-NEXT: ret i1 [[D]]
;
%y = urem i32 42, %yy ; Thwart complexity-based canonicalization
%a = call { i32, i1 } @llvm.uadd.with.overflow.i32(i32 %x, i32 %y)
%b = extractvalue { i32, i1 } %a, 1
- store i1 %b, i1* %p
+ store i1 %b, ptr %p
%c = extractvalue { i32, i1 } %a, 0
%d = icmp ugt i32 %y, %c
ret i1 %d
}
-define i1 @uadd_res_ult_const(i32 %x, i1* %p) nounwind {
+define i1 @uadd_res_ult_const(i32 %x, ptr %p) nounwind {
; CHECK-LABEL: @uadd_res_ult_const(
; CHECK-NEXT: [[A:%.*]] = call { i32, i1 } @llvm.uadd.with.overflow.i32(i32 [[X:%.*]], i32 42)
; CHECK-NEXT: [[B:%.*]] = extractvalue { i32, i1 } [[A]], 1
-; CHECK-NEXT: store i1 [[B]], i1* [[P:%.*]], align 1
+; CHECK-NEXT: store i1 [[B]], ptr [[P:%.*]], align 1
; CHECK-NEXT: [[D:%.*]] = extractvalue { i32, i1 } [[A]], 1
; CHECK-NEXT: ret i1 [[D]]
;
%a = call { i32, i1 } @llvm.uadd.with.overflow.i32(i32 %x, i32 42)
%b = extractvalue { i32, i1 } %a, 1
- store i1 %b, i1* %p
+ store i1 %b, ptr %p
%c = extractvalue { i32, i1 } %a, 0
%d = icmp ult i32 %c, 42
ret i1 %d
}
-define i1 @uadd_res_ult_const_one(i32 %x, i1* %p) nounwind {
+define i1 @uadd_res_ult_const_one(i32 %x, ptr %p) nounwind {
; CHECK-LABEL: @uadd_res_ult_const_one(
; CHECK-NEXT: [[A:%.*]] = call { i32, i1 } @llvm.uadd.with.overflow.i32(i32 [[X:%.*]], i32 1)
; CHECK-NEXT: [[B:%.*]] = extractvalue { i32, i1 } [[A]], 1
-; CHECK-NEXT: store i1 [[B]], i1* [[P:%.*]], align 1
+; CHECK-NEXT: store i1 [[B]], ptr [[P:%.*]], align 1
; CHECK-NEXT: [[D:%.*]] = extractvalue { i32, i1 } [[A]], 1
; CHECK-NEXT: ret i1 [[D]]
;
%a = call { i32, i1 } @llvm.uadd.with.overflow.i32(i32 %x, i32 1)
%b = extractvalue { i32, i1 } %a, 1
- store i1 %b, i1* %p
+ store i1 %b, ptr %p
%c = extractvalue { i32, i1 } %a, 0
%d = icmp ult i32 %c, 1
ret i1 %d
}
-define i1 @uadd_res_ult_const_minus_one(i32 %x, i1* %p) nounwind {
+define i1 @uadd_res_ult_const_minus_one(i32 %x, ptr %p) nounwind {
; CHECK-LABEL: @uadd_res_ult_const_minus_one(
; CHECK-NEXT: [[A:%.*]] = call { i32, i1 } @llvm.uadd.with.overflow.i32(i32 [[X:%.*]], i32 -1)
; CHECK-NEXT: [[B:%.*]] = extractvalue { i32, i1 } [[A]], 1
-; CHECK-NEXT: store i1 [[B]], i1* [[P:%.*]], align 1
+; CHECK-NEXT: store i1 [[B]], ptr [[P:%.*]], align 1
; CHECK-NEXT: [[D:%.*]] = extractvalue { i32, i1 } [[A]], 1
; CHECK-NEXT: ret i1 [[D]]
;
%a = call { i32, i1 } @llvm.uadd.with.overflow.i32(i32 %x, i32 -1)
%b = extractvalue { i32, i1 } %a, 1
- store i1 %b, i1* %p
+ store i1 %b, ptr %p
%c = extractvalue { i32, i1 } %a, 0
%d = icmp ult i32 %c, -1
ret i1 %d
ret { <4 x i8>, <4 x i1> } %x
}
-define i8 @smul_neg1(i8 %x, i1* %p) {
+define i8 @smul_neg1(i8 %x, ptr %p) {
; CHECK-LABEL: @smul_neg1(
; CHECK-NEXT: [[R:%.*]] = sub i8 0, [[X:%.*]]
; CHECK-NEXT: [[OV:%.*]] = icmp eq i8 [[X]], -128
-; CHECK-NEXT: store i1 [[OV]], i1* [[P:%.*]], align 1
+; CHECK-NEXT: store i1 [[OV]], ptr [[P:%.*]], align 1
; CHECK-NEXT: ret i8 [[R]]
;
%m = tail call { i8, i1 } @llvm.smul.with.overflow.i8(i8 %x, i8 -1)
%r = extractvalue { i8, i1 } %m, 0
%ov = extractvalue { i8, i1 } %m, 1
- store i1 %ov, i1* %p
+ store i1 %ov, ptr %p
ret i8 %r
}
-define <4 x i8> @smul_neg1_vec(<4 x i8> %x, <4 x i1>* %p) {
+define <4 x i8> @smul_neg1_vec(<4 x i8> %x, ptr %p) {
; CHECK-LABEL: @smul_neg1_vec(
; CHECK-NEXT: [[R:%.*]] = sub <4 x i8> zeroinitializer, [[X:%.*]]
; CHECK-NEXT: [[OV:%.*]] = icmp eq <4 x i8> [[X]], <i8 -128, i8 -128, i8 -128, i8 -128>
-; CHECK-NEXT: store <4 x i1> [[OV]], <4 x i1>* [[P:%.*]], align 1
+; CHECK-NEXT: store <4 x i1> [[OV]], ptr [[P:%.*]], align 1
; CHECK-NEXT: ret <4 x i8> [[R]]
;
%m = tail call { <4 x i8>, <4 x i1> } @llvm.smul.with.overflow.v4i8(<4 x i8> %x, <4 x i8> <i8 -1, i8 -1, i8 -1, i8 -1>)
%r = extractvalue { <4 x i8>, <4 x i1> } %m, 0
%ov = extractvalue { <4 x i8>, <4 x i1> } %m, 1
- store <4 x i1> %ov, <4 x i1>* %p
+ store <4 x i1> %ov, ptr %p
ret <4 x i8> %r
}
-define <4 x i8> @smul_neg1_vec_poison(<4 x i8> %x, <4 x i1>* %p) {
+define <4 x i8> @smul_neg1_vec_poison(<4 x i8> %x, ptr %p) {
; CHECK-LABEL: @smul_neg1_vec_poison(
; CHECK-NEXT: [[R:%.*]] = sub <4 x i8> zeroinitializer, [[X:%.*]]
; CHECK-NEXT: [[OV:%.*]] = icmp eq <4 x i8> [[X]], <i8 -128, i8 -128, i8 -128, i8 -128>
-; CHECK-NEXT: store <4 x i1> [[OV]], <4 x i1>* [[P:%.*]], align 1
+; CHECK-NEXT: store <4 x i1> [[OV]], ptr [[P:%.*]], align 1
; CHECK-NEXT: ret <4 x i8> [[R]]
;
%m = tail call { <4 x i8>, <4 x i1> } @llvm.smul.with.overflow.v4i8(<4 x i8> %x, <4 x i8> <i8 -1, i8 -1, i8 poison, i8 -1>)
%r = extractvalue { <4 x i8>, <4 x i1> } %m, 0
%ov = extractvalue { <4 x i8>, <4 x i1> } %m, 1
- store <4 x i1> %ov, <4 x i1>* %p
+ store <4 x i1> %ov, ptr %p
ret <4 x i8> %r
}
-define i8 @smul_neg2(i8 %x, i1* %p) {
+define i8 @smul_neg2(i8 %x, ptr %p) {
; CHECK-LABEL: @smul_neg2(
; CHECK-NEXT: [[M:%.*]] = tail call { i8, i1 } @llvm.smul.with.overflow.i8(i8 [[X:%.*]], i8 -2)
; CHECK-NEXT: [[R:%.*]] = extractvalue { i8, i1 } [[M]], 0
; CHECK-NEXT: [[OV:%.*]] = extractvalue { i8, i1 } [[M]], 1
-; CHECK-NEXT: store i1 [[OV]], i1* [[P:%.*]], align 1
+; CHECK-NEXT: store i1 [[OV]], ptr [[P:%.*]], align 1
; CHECK-NEXT: ret i8 [[R]]
;
%m = tail call { i8, i1 } @llvm.smul.with.overflow.i8(i8 %x, i8 -2)
%r = extractvalue { i8, i1 } %m, 0
%ov = extractvalue { i8, i1 } %m, 1
- store i1 %ov, i1* %p
+ store i1 %ov, ptr %p
ret i8 %r
}
-define i8 @umul_neg1(i8 %x, i1* %p) {
+define i8 @umul_neg1(i8 %x, ptr %p) {
; CHECK-LABEL: @umul_neg1(
; CHECK-NEXT: [[R:%.*]] = sub i8 0, [[X:%.*]]
; CHECK-NEXT: [[OV:%.*]] = icmp ugt i8 [[X]], 1
-; CHECK-NEXT: store i1 [[OV]], i1* [[P:%.*]], align 1
+; CHECK-NEXT: store i1 [[OV]], ptr [[P:%.*]], align 1
; CHECK-NEXT: ret i8 [[R]]
;
%m = tail call { i8, i1 } @llvm.umul.with.overflow.i8(i8 %x, i8 -1)
%r = extractvalue { i8, i1 } %m, 0
%ov = extractvalue { i8, i1 } %m, 1
- store i1 %ov, i1* %p
+ store i1 %ov, ptr %p
ret i8 %r
}
-define <4 x i8> @umul_neg1_vec(<4 x i8> %x, <4 x i1>* %p) {
+define <4 x i8> @umul_neg1_vec(<4 x i8> %x, ptr %p) {
; CHECK-LABEL: @umul_neg1_vec(
; CHECK-NEXT: [[R:%.*]] = sub <4 x i8> zeroinitializer, [[X:%.*]]
; CHECK-NEXT: [[OV:%.*]] = icmp ugt <4 x i8> [[X]], <i8 1, i8 1, i8 1, i8 1>
-; CHECK-NEXT: store <4 x i1> [[OV]], <4 x i1>* [[P:%.*]], align 1
+; CHECK-NEXT: store <4 x i1> [[OV]], ptr [[P:%.*]], align 1
; CHECK-NEXT: ret <4 x i8> [[R]]
;
%m = tail call { <4 x i8>, <4 x i1> } @llvm.umul.with.overflow.v4i8(<4 x i8> %x, <4 x i8> <i8 -1, i8 -1, i8 -1, i8 -1>)
%r = extractvalue { <4 x i8>, <4 x i1> } %m, 0
%ov = extractvalue { <4 x i8>, <4 x i1> } %m, 1
- store <4 x i1> %ov, <4 x i1>* %p
+ store <4 x i1> %ov, ptr %p
ret <4 x i8> %r
}
-define <4 x i8> @umul_neg1_vec_poison(<4 x i8> %x, <4 x i1>* %p) {
+define <4 x i8> @umul_neg1_vec_poison(<4 x i8> %x, ptr %p) {
; CHECK-LABEL: @umul_neg1_vec_poison(
; CHECK-NEXT: [[R:%.*]] = sub <4 x i8> zeroinitializer, [[X:%.*]]
; CHECK-NEXT: [[OV:%.*]] = icmp ugt <4 x i8> [[X]], <i8 1, i8 1, i8 1, i8 1>
-; CHECK-NEXT: store <4 x i1> [[OV]], <4 x i1>* [[P:%.*]], align 1
+; CHECK-NEXT: store <4 x i1> [[OV]], ptr [[P:%.*]], align 1
; CHECK-NEXT: ret <4 x i8> [[R]]
;
%m = tail call { <4 x i8>, <4 x i1> } @llvm.umul.with.overflow.v4i8(<4 x i8> %x, <4 x i8> <i8 poison, i8 -1, i8 -1, i8 poison>)
%r = extractvalue { <4 x i8>, <4 x i1> } %m, 0
%ov = extractvalue { <4 x i8>, <4 x i1> } %m, 1
- store <4 x i1> %ov, <4 x i1>* %p
+ store <4 x i1> %ov, ptr %p
ret <4 x i8> %r
}
ret i8 %r
}
-define i8 @umul_2(i8 %x, i1* %p) {
+define i8 @umul_2(i8 %x, ptr %p) {
; CHECK-LABEL: @umul_2(
; CHECK-NEXT: [[R:%.*]] = shl i8 [[X:%.*]], 1
; CHECK-NEXT: [[OV:%.*]] = icmp slt i8 [[X]], 0
-; CHECK-NEXT: store i1 [[OV]], i1* [[P:%.*]], align 1
+; CHECK-NEXT: store i1 [[OV]], ptr [[P:%.*]], align 1
; CHECK-NEXT: ret i8 [[R]]
;
%m = tail call { i8, i1 } @llvm.umul.with.overflow.i8(i8 %x, i8 2)
%r = extractvalue { i8, i1 } %m, 0
%ov = extractvalue { i8, i1 } %m, 1
- store i1 %ov, i1* %p
+ store i1 %ov, ptr %p
ret i8 %r
}
-define i8 @umul_8(i8 %x, i1* %p) {
+define i8 @umul_8(i8 %x, ptr %p) {
; CHECK-LABEL: @umul_8(
; CHECK-NEXT: [[R:%.*]] = shl i8 [[X:%.*]], 3
; CHECK-NEXT: [[OV:%.*]] = icmp ugt i8 [[X]], 31
-; CHECK-NEXT: store i1 [[OV]], i1* [[P:%.*]], align 1
+; CHECK-NEXT: store i1 [[OV]], ptr [[P:%.*]], align 1
; CHECK-NEXT: ret i8 [[R]]
;
%m = tail call { i8, i1 } @llvm.umul.with.overflow.i8(i8 %x, i8 8)
%r = extractvalue { i8, i1 } %m, 0
%ov = extractvalue { i8, i1 } %m, 1
- store i1 %ov, i1* %p
+ store i1 %ov, ptr %p
ret i8 %r
}
-define i8 @umul_64(i8 %x, i1* %p) {
+define i8 @umul_64(i8 %x, ptr %p) {
; CHECK-LABEL: @umul_64(
; CHECK-NEXT: [[R:%.*]] = shl i8 [[X:%.*]], 6
; CHECK-NEXT: [[OV:%.*]] = icmp ugt i8 [[X]], 3
-; CHECK-NEXT: store i1 [[OV]], i1* [[P:%.*]], align 1
+; CHECK-NEXT: store i1 [[OV]], ptr [[P:%.*]], align 1
; CHECK-NEXT: ret i8 [[R]]
;
%m = tail call { i8, i1 } @llvm.umul.with.overflow.i8(i8 %x, i8 64)
%r = extractvalue { i8, i1 } %m, 0
%ov = extractvalue { i8, i1 } %m, 1
- store i1 %ov, i1* %p
+ store i1 %ov, ptr %p
ret i8 %r
}
-define i8 @umul_256(i8 %x, i1* %p) {
+define i8 @umul_256(i8 %x, ptr %p) {
; CHECK-LABEL: @umul_256(
-; CHECK-NEXT: store i1 false, i1* [[P:%.*]], align 1
+; CHECK-NEXT: store i1 false, ptr [[P:%.*]], align 1
; CHECK-NEXT: ret i8 0
;
%m = tail call { i8, i1 } @llvm.umul.with.overflow.i8(i8 %x, i8 256)
%r = extractvalue { i8, i1 } %m, 0
%ov = extractvalue { i8, i1 } %m, 1
- store i1 %ov, i1* %p
+ store i1 %ov, ptr %p
ret i8 %r
}
-define <4 x i8> @umul_4_vec_poison(<4 x i8> %x, <4 x i1>* %p) {
+define <4 x i8> @umul_4_vec_poison(<4 x i8> %x, ptr %p) {
; CHECK-LABEL: @umul_4_vec_poison(
; CHECK-NEXT: [[R:%.*]] = shl <4 x i8> [[X:%.*]], <i8 2, i8 2, i8 2, i8 2>
; CHECK-NEXT: [[OV:%.*]] = icmp ugt <4 x i8> [[X]], <i8 63, i8 63, i8 63, i8 63>
-; CHECK-NEXT: store <4 x i1> [[OV]], <4 x i1>* [[P:%.*]], align 1
+; CHECK-NEXT: store <4 x i1> [[OV]], ptr [[P:%.*]], align 1
; CHECK-NEXT: ret <4 x i8> [[R]]
;
%m = tail call { <4 x i8>, <4 x i1> } @llvm.umul.with.overflow.v4i8(<4 x i8> %x, <4 x i8> <i8 poison, i8 4, i8 4, i8 poison>)
%r = extractvalue { <4 x i8>, <4 x i1> } %m, 0
%ov = extractvalue { <4 x i8>, <4 x i1> } %m, 1
- store <4 x i1> %ov, <4 x i1>* %p
+ store <4 x i1> %ov, ptr %p
ret <4 x i8> %r
}
; Negative test: not PowerOf2
-define i8 @umul_3(i8 %x, i1* %p) {
+define i8 @umul_3(i8 %x, ptr %p) {
; CHECK-LABEL: @umul_3(
; CHECK-NEXT: [[M:%.*]] = tail call { i8, i1 } @llvm.umul.with.overflow.i8(i8 [[X:%.*]], i8 3)
; CHECK-NEXT: [[R:%.*]] = extractvalue { i8, i1 } [[M]], 0
; CHECK-NEXT: [[OV:%.*]] = extractvalue { i8, i1 } [[M]], 1
-; CHECK-NEXT: store i1 [[OV]], i1* [[P:%.*]], align 1
+; CHECK-NEXT: store i1 [[OV]], ptr [[P:%.*]], align 1
; CHECK-NEXT: ret i8 [[R]]
;
%m = tail call { i8, i1 } @llvm.umul.with.overflow.i8(i8 %x, i8 3)
%r = extractvalue { i8, i1 } %m, 0
%ov = extractvalue { i8, i1 } %m, 1
- store i1 %ov, i1* %p
+ store i1 %ov, ptr %p
ret i8 %r
}
-define i8 @smul_4(i8 %x, i1* %p) {
+define i8 @smul_4(i8 %x, ptr %p) {
; CHECK-LABEL: @smul_4(
; CHECK-NEXT: [[R:%.*]] = shl i8 [[X:%.*]], 2
; CHECK-NEXT: [[TMP1:%.*]] = add i8 [[X]], -32
; CHECK-NEXT: [[OV:%.*]] = icmp ult i8 [[TMP1]], -64
-; CHECK-NEXT: store i1 [[OV]], i1* [[P:%.*]], align 1
+; CHECK-NEXT: store i1 [[OV]], ptr [[P:%.*]], align 1
; CHECK-NEXT: ret i8 [[R]]
;
%m = tail call { i8, i1 } @llvm.smul.with.overflow.i8(i8 %x, i8 4)
%r = extractvalue { i8, i1 } %m, 0
%ov = extractvalue { i8, i1 } %m, 1
- store i1 %ov, i1* %p
+ store i1 %ov, ptr %p
ret i8 %r
}
-define i8 @smul_16(i8 %x, i1* %p) {
+define i8 @smul_16(i8 %x, ptr %p) {
; CHECK-LABEL: @smul_16(
; CHECK-NEXT: [[R:%.*]] = shl i8 [[X:%.*]], 4
; CHECK-NEXT: [[TMP1:%.*]] = add i8 [[X]], -8
; CHECK-NEXT: [[OV:%.*]] = icmp ult i8 [[TMP1]], -16
-; CHECK-NEXT: store i1 [[OV]], i1* [[P:%.*]], align 1
+; CHECK-NEXT: store i1 [[OV]], ptr [[P:%.*]], align 1
; CHECK-NEXT: ret i8 [[R]]
;
%m = tail call { i8, i1 } @llvm.smul.with.overflow.i8(i8 %x, i8 16)
%r = extractvalue { i8, i1 } %m, 0
%ov = extractvalue { i8, i1 } %m, 1
- store i1 %ov, i1* %p
+ store i1 %ov, ptr %p
ret i8 %r
}
-define i8 @smul_32(i8 %x, i1* %p) {
+define i8 @smul_32(i8 %x, ptr %p) {
; CHECK-LABEL: @smul_32(
; CHECK-NEXT: [[R:%.*]] = shl i8 [[X:%.*]], 5
; CHECK-NEXT: [[TMP1:%.*]] = add i8 [[X]], -4
; CHECK-NEXT: [[OV:%.*]] = icmp ult i8 [[TMP1]], -8
-; CHECK-NEXT: store i1 [[OV]], i1* [[P:%.*]], align 1
+; CHECK-NEXT: store i1 [[OV]], ptr [[P:%.*]], align 1
; CHECK-NEXT: ret i8 [[R]]
;
%m = tail call { i8, i1 } @llvm.smul.with.overflow.i8(i8 %x, i8 32)
%r = extractvalue { i8, i1 } %m, 0
%ov = extractvalue { i8, i1 } %m, 1
- store i1 %ov, i1* %p
+ store i1 %ov, ptr %p
ret i8 %r
}
-define i8 @smul_128(i8 %x, i1* %p) {
+define i8 @smul_128(i8 %x, ptr %p) {
; CHECK-LABEL: @smul_128(
; CHECK-NEXT: [[R:%.*]] = shl i8 [[X:%.*]], 7
; CHECK-NEXT: [[OV:%.*]] = icmp ugt i8 [[X]], 1
-; CHECK-NEXT: store i1 [[OV]], i1* [[P:%.*]], align 1
+; CHECK-NEXT: store i1 [[OV]], ptr [[P:%.*]], align 1
; CHECK-NEXT: ret i8 [[R]]
;
%m = tail call { i8, i1 } @llvm.smul.with.overflow.i8(i8 %x, i8 128)
%r = extractvalue { i8, i1 } %m, 0
%ov = extractvalue { i8, i1 } %m, 1
- store i1 %ov, i1* %p
+ store i1 %ov, ptr %p
ret i8 %r
}
-define <4 x i8> @smul_2_vec_poison(<4 x i8> %x, <4 x i1>* %p) {
+define <4 x i8> @smul_2_vec_poison(<4 x i8> %x, ptr %p) {
; CHECK-LABEL: @smul_2_vec_poison(
; CHECK-NEXT: [[R:%.*]] = shl <4 x i8> [[X:%.*]], <i8 1, i8 1, i8 1, i8 1>
; CHECK-NEXT: [[TMP1:%.*]] = add <4 x i8> [[X]], <i8 64, i8 64, i8 64, i8 64>
; CHECK-NEXT: [[OV:%.*]] = icmp slt <4 x i8> [[TMP1]], zeroinitializer
-; CHECK-NEXT: store <4 x i1> [[OV]], <4 x i1>* [[P:%.*]], align 1
+; CHECK-NEXT: store <4 x i1> [[OV]], ptr [[P:%.*]], align 1
; CHECK-NEXT: ret <4 x i8> [[R]]
;
%m = tail call { <4 x i8>, <4 x i1> } @llvm.smul.with.overflow.v4i8(<4 x i8> %x, <4 x i8> <i8 poison, i8 2, i8 2, i8 poison>)
%r = extractvalue { <4 x i8>, <4 x i1> } %m, 0
%ov = extractvalue { <4 x i8>, <4 x i1> } %m, 1
- store <4 x i1> %ov, <4 x i1>* %p
+ store <4 x i1> %ov, ptr %p
ret <4 x i8> %r
}
; Negative test: not PowerOf2
-define i8 @smul_7(i8 %x, i1* %p) {
+define i8 @smul_7(i8 %x, ptr %p) {
; CHECK-LABEL: @smul_7(
; CHECK-NEXT: [[M:%.*]] = tail call { i8, i1 } @llvm.smul.with.overflow.i8(i8 [[X:%.*]], i8 7)
; CHECK-NEXT: [[R:%.*]] = extractvalue { i8, i1 } [[M]], 0
; CHECK-NEXT: [[OV:%.*]] = extractvalue { i8, i1 } [[M]], 1
-; CHECK-NEXT: store i1 [[OV]], i1* [[P:%.*]], align 1
+; CHECK-NEXT: store i1 [[OV]], ptr [[P:%.*]], align 1
; CHECK-NEXT: ret i8 [[R]]
;
%m = tail call { i8, i1 } @llvm.smul.with.overflow.i8(i8 %x, i8 7)
%r = extractvalue { i8, i1 } %m, 0
%ov = extractvalue { i8, i1 } %m, 1
- store i1 %ov, i1* %p
+ store i1 %ov, ptr %p
ret i8 %r
}
ret i1 %E
}
-define i1 @xor_icmp_ptr(i8* %c, i8* %d) {
+define i1 @xor_icmp_ptr(ptr %c, ptr %d) {
; CHECK-LABEL: @xor_icmp_ptr(
-; CHECK-NEXT: [[CMP:%.*]] = icmp slt i8* [[C:%.*]], null
-; CHECK-NEXT: [[CMP1:%.*]] = icmp slt i8* [[D:%.*]], null
+; CHECK-NEXT: [[CMP:%.*]] = icmp slt ptr [[C:%.*]], null
+; CHECK-NEXT: [[CMP1:%.*]] = icmp slt ptr [[D:%.*]], null
; CHECK-NEXT: [[XOR:%.*]] = xor i1 [[CMP]], [[CMP1]]
; CHECK-NEXT: ret i1 [[XOR]]
;
- %cmp = icmp slt i8* %c, null
- %cmp1 = icmp slt i8* %d, null
+ %cmp = icmp slt ptr %c, null
+ %cmp1 = icmp slt ptr %d, null
%xor = xor i1 %cmp, %cmp1
ret i1 %xor
}
; uses, so we don't consider it, even though some cases are freely invertible.
; %cond0 is extra-used in select, which is freely invertible.
-define i1 @v0_select_of_consts(i32 %X, i32* %selected) {
+define i1 @v0_select_of_consts(i32 %X, ptr %selected) {
; CHECK-LABEL: @v0_select_of_consts(
; CHECK-NEXT: [[COND0_INV:%.*]] = icmp sgt i32 [[X:%.*]], 32767
; CHECK-NEXT: [[SELECT:%.*]] = select i1 [[COND0_INV]], i32 32767, i32 -32768
-; CHECK-NEXT: store i32 [[SELECT]], i32* [[SELECTED:%.*]], align 4
+; CHECK-NEXT: store i32 [[SELECT]], ptr [[SELECTED:%.*]], align 4
; CHECK-NEXT: [[X_OFF:%.*]] = add i32 [[X]], 32767
; CHECK-NEXT: [[TMP1:%.*]] = icmp ult i32 [[X_OFF]], 65535
; CHECK-NEXT: ret i1 [[TMP1]]
%cond0 = icmp sgt i32 %X, 32767
%cond1 = icmp sgt i32 %X, -32768
%select = select i1 %cond0, i32 32767, i32 -32768
- store i32 %select, i32* %selected
+ store i32 %select, ptr %selected
%res = xor i1 %cond0, %cond1
ret i1 %res
}
-define i1 @v1_select_of_var_and_const(i32 %X, i32 %Y, i32* %selected) {
+define i1 @v1_select_of_var_and_const(i32 %X, i32 %Y, ptr %selected) {
; CHECK-LABEL: @v1_select_of_var_and_const(
; CHECK-NEXT: [[COND0:%.*]] = icmp slt i32 [[X:%.*]], 32768
; CHECK-NEXT: [[SELECT:%.*]] = select i1 [[COND0]], i32 -32768, i32 [[Y:%.*]]
-; CHECK-NEXT: store i32 [[SELECT]], i32* [[SELECTED:%.*]], align 4
+; CHECK-NEXT: store i32 [[SELECT]], ptr [[SELECTED:%.*]], align 4
; CHECK-NEXT: [[X_OFF:%.*]] = add i32 [[X]], 32767
; CHECK-NEXT: [[TMP1:%.*]] = icmp ult i32 [[X_OFF]], 65535
; CHECK-NEXT: ret i1 [[TMP1]]
%cond0 = icmp sgt i32 %X, 32767
%cond1 = icmp sgt i32 %X, -32768
%select = select i1 %cond0, i32 %Y, i32 -32768
- store i32 %select, i32* %selected
+ store i32 %select, ptr %selected
%res = xor i1 %cond0, %cond1
ret i1 %res
}
-define i1 @v2_select_of_const_and_var(i32 %X, i32 %Y, i32* %selected) {
+define i1 @v2_select_of_const_and_var(i32 %X, i32 %Y, ptr %selected) {
; CHECK-LABEL: @v2_select_of_const_and_var(
; CHECK-NEXT: [[COND0_INV:%.*]] = icmp sgt i32 [[X:%.*]], 32767
; CHECK-NEXT: [[SELECT:%.*]] = select i1 [[COND0_INV]], i32 32767, i32 [[Y:%.*]]
-; CHECK-NEXT: store i32 [[SELECT]], i32* [[SELECTED:%.*]], align 4
+; CHECK-NEXT: store i32 [[SELECT]], ptr [[SELECTED:%.*]], align 4
; CHECK-NEXT: [[X_OFF:%.*]] = add i32 [[X]], 32767
; CHECK-NEXT: [[TMP1:%.*]] = icmp ult i32 [[X_OFF]], 65535
; CHECK-NEXT: ret i1 [[TMP1]]
%cond0 = icmp sgt i32 %X, 32767
%cond1 = icmp sgt i32 %X, -32768
%select = select i1 %cond0, i32 32767, i32 %Y
- store i32 %select, i32* %selected
+ store i32 %select, ptr %selected
%res = xor i1 %cond0, %cond1
ret i1 %res
}
; Branch is also freely invertible
-define i1 @v3_branch(i32 %X, i32* %dst0, i32* %dst1) {
+define i1 @v3_branch(i32 %X, ptr %dst0, ptr %dst1) {
; CHECK-LABEL: @v3_branch(
; CHECK-NEXT: begin:
; CHECK-NEXT: [[COND0:%.*]] = icmp slt i32 [[X:%.*]], 32768
; CHECK-NEXT: br i1 [[COND0]], label [[BB1:%.*]], label [[BB0:%.*]]
; CHECK: bb0:
-; CHECK-NEXT: store i32 0, i32* [[DST0:%.*]], align 4
+; CHECK-NEXT: store i32 0, ptr [[DST0:%.*]], align 4
; CHECK-NEXT: br label [[END:%.*]]
; CHECK: bb1:
-; CHECK-NEXT: store i32 0, i32* [[DST1:%.*]], align 4
+; CHECK-NEXT: store i32 0, ptr [[DST1:%.*]], align 4
; CHECK-NEXT: br label [[END]]
; CHECK: end:
; CHECK-NEXT: [[X_OFF:%.*]] = add i32 [[X]], 32767
%cond1 = icmp sgt i32 %X, -32768
br i1 %cond0, label %bb0, label %bb1
bb0:
- store i32 0, i32* %dst0
+ store i32 0, ptr %dst0
br label %end
bb1:
- store i32 0, i32* %dst1
+ store i32 0, ptr %dst1
br label %end
end:
%res = xor i1 %cond0, %cond1
}
; Can invert 'not'.
-define i1 @v4_not_store(i32 %X, i1* %not_cond) {
+define i1 @v4_not_store(i32 %X, ptr %not_cond) {
; CHECK-LABEL: @v4_not_store(
; CHECK-NEXT: [[COND0:%.*]] = icmp slt i32 [[X:%.*]], 32768
-; CHECK-NEXT: store i1 [[COND0]], i1* [[NOT_COND:%.*]], align 1
+; CHECK-NEXT: store i1 [[COND0]], ptr [[NOT_COND:%.*]], align 1
; CHECK-NEXT: [[X_OFF:%.*]] = add i32 [[X]], 32767
; CHECK-NEXT: [[TMP1:%.*]] = icmp ult i32 [[X_OFF]], 65535
; CHECK-NEXT: ret i1 [[TMP1]]
;
%cond0 = icmp sgt i32 %X, 32767
%not_cond0 = xor i1 %cond0, -1
- store i1 %not_cond0, i1* %not_cond
+ store i1 %not_cond0, ptr %not_cond
%cond1 = icmp sgt i32 %X, -32768
%select = select i1 %cond0, i32 32767, i32 -32768
%res = xor i1 %cond0, %cond1
}
; All extra uses are invertible.
-define i1 @v5_select_and_not(i32 %X, i32 %Y, i32* %selected, i1* %not_cond) {
+define i1 @v5_select_and_not(i32 %X, i32 %Y, ptr %selected, ptr %not_cond) {
; CHECK-LABEL: @v5_select_and_not(
; CHECK-NEXT: [[COND0:%.*]] = icmp slt i32 [[X:%.*]], 32768
; CHECK-NEXT: [[SELECT:%.*]] = select i1 [[COND0]], i32 [[Y:%.*]], i32 32767
-; CHECK-NEXT: store i1 [[COND0]], i1* [[NOT_COND:%.*]], align 1
-; CHECK-NEXT: store i32 [[SELECT]], i32* [[SELECTED:%.*]], align 4
+; CHECK-NEXT: store i1 [[COND0]], ptr [[NOT_COND:%.*]], align 1
+; CHECK-NEXT: store i32 [[SELECT]], ptr [[SELECTED:%.*]], align 4
; CHECK-NEXT: [[X_OFF:%.*]] = add i32 [[X]], 32767
; CHECK-NEXT: [[TMP1:%.*]] = icmp ult i32 [[X_OFF]], 65535
; CHECK-NEXT: ret i1 [[TMP1]]
%cond1 = icmp sgt i32 %X, -32768
%select = select i1 %cond0, i32 32767, i32 %Y
%not_cond0 = xor i1 %cond0, -1
- store i1 %not_cond0, i1* %not_cond
- store i32 %select, i32* %selected
+ store i1 %not_cond0, ptr %not_cond
+ store i32 %select, ptr %selected
%res = xor i1 %cond0, %cond1
ret i1 %res
}
; Not all extra uses are invertible.
-define i1 @n6_select_and_not(i32 %X, i32 %Y, i32* %selected, i1* %not_cond) {
+define i1 @n6_select_and_not(i32 %X, i32 %Y, ptr %selected, ptr %not_cond) {
; CHECK-LABEL: @n6_select_and_not(
; CHECK-NEXT: [[COND0:%.*]] = icmp sgt i32 [[X:%.*]], 32767
; CHECK-NEXT: [[COND1:%.*]] = icmp sgt i32 [[X]], -32768
; CHECK-NEXT: [[SELECT:%.*]] = select i1 [[COND0]], i32 32767, i32 [[Y:%.*]]
-; CHECK-NEXT: store i1 [[COND0]], i1* [[NOT_COND:%.*]], align 1
-; CHECK-NEXT: store i32 [[SELECT]], i32* [[SELECTED:%.*]], align 4
+; CHECK-NEXT: store i1 [[COND0]], ptr [[NOT_COND:%.*]], align 1
+; CHECK-NEXT: store i32 [[SELECT]], ptr [[SELECTED:%.*]], align 4
; CHECK-NEXT: [[RES:%.*]] = xor i1 [[COND0]], [[COND1]]
; CHECK-NEXT: ret i1 [[RES]]
;
%cond0 = icmp sgt i32 %X, 32767
%cond1 = icmp sgt i32 %X, -32768
%select = select i1 %cond0, i32 32767, i32 %Y
- store i1 %cond0, i1* %not_cond
- store i32 %select, i32* %selected
+ store i1 %cond0, ptr %not_cond
+ store i32 %select, ptr %selected
%res = xor i1 %cond0, %cond1
ret i1 %res
}
; Not freely invertible, would require extra 'not' instruction.
-define i1 @n7_store(i32 %X, i1* %cond) {
+define i1 @n7_store(i32 %X, ptr %cond) {
; CHECK-LABEL: @n7_store(
; CHECK-NEXT: [[COND0:%.*]] = icmp sgt i32 [[X:%.*]], 32767
-; CHECK-NEXT: store i1 [[COND0]], i1* [[COND:%.*]], align 1
+; CHECK-NEXT: store i1 [[COND0]], ptr [[COND:%.*]], align 1
; CHECK-NEXT: [[COND1:%.*]] = icmp sgt i32 [[X]], -32768
; CHECK-NEXT: [[RES:%.*]] = xor i1 [[COND0]], [[COND1]]
; CHECK-NEXT: ret i1 [[RES]]
;
%cond0 = icmp sgt i32 %X, 32767
- store i1 %cond0, i1* %cond
+ store i1 %cond0, ptr %cond
%cond1 = icmp sgt i32 %X, -32768
%select = select i1 %cond0, i32 32767, i32 -32768
%res = xor i1 %cond0, %cond1
define i4 @t11(i4 %x) {
; CHECK-LABEL: @t11(
; CHECK-NEXT: [[I0:%.*]] = or i4 [[X:%.*]], -4
-; CHECK-NEXT: [[I1:%.*]] = xor i4 [[I0]], ptrtoint (i32* @G2 to i4)
+; CHECK-NEXT: [[I1:%.*]] = xor i4 [[I0]], ptrtoint (ptr @G2 to i4)
; CHECK-NEXT: ret i4 [[I1]]
;
%i0 = or i4 %x, 12
- %i1 = xor i4 %i0, ptrtoint (i32* @G2 to i4)
+ %i1 = xor i4 %i0, ptrtoint (ptr @G2 to i4)
ret i4 %i1
}
define i4 @t12(i4 %x) {
; CHECK-LABEL: @t12(
-; CHECK-NEXT: [[I0:%.*]] = or i4 [[X:%.*]], ptrtoint (i32* @G to i4)
+; CHECK-NEXT: [[I0:%.*]] = or i4 [[X:%.*]], ptrtoint (ptr @G to i4)
; CHECK-NEXT: [[I1:%.*]] = xor i4 [[I0]], -6
; CHECK-NEXT: ret i4 [[I1]]
;
- %i0 = or i4 %x, ptrtoint (i32* @G to i4)
+ %i0 = or i4 %x, ptrtoint (ptr @G to i4)
%i1 = xor i4 %i0, 10
ret i4 %i1
}
define i4 @t13(i4 %x) {
; CHECK-LABEL: @t13(
-; CHECK-NEXT: [[I0:%.*]] = or i4 [[X:%.*]], ptrtoint (i32* @G to i4)
-; CHECK-NEXT: [[I1:%.*]] = xor i4 [[I0]], ptrtoint (i32* @G2 to i4)
+; CHECK-NEXT: [[I0:%.*]] = or i4 [[X:%.*]], ptrtoint (ptr @G to i4)
+; CHECK-NEXT: [[I1:%.*]] = xor i4 [[I0]], ptrtoint (ptr @G2 to i4)
; CHECK-NEXT: ret i4 [[I1]]
;
- %i0 = or i4 %x, ptrtoint (i32* @G to i4)
- %i1 = xor i4 %i0, ptrtoint (i32* @G2 to i4)
+ %i0 = or i4 %x, ptrtoint (ptr @G to i4)
+ %i1 = xor i4 %i0, ptrtoint (ptr @G2 to i4)
ret i4 %i1
}
define void @test20(i32 %A, i32 %B) {
; CHECK-LABEL: @test20(
-; CHECK-NEXT: store i32 [[B:%.*]], i32* @G1, align 4
-; CHECK-NEXT: store i32 [[A:%.*]], i32* @G2, align 4
+; CHECK-NEXT: store i32 [[B:%.*]], ptr @G1, align 4
+; CHECK-NEXT: store i32 [[A:%.*]], ptr @G2, align 4
; CHECK-NEXT: ret void
;
%t2 = xor i32 %B, %A
%t5 = xor i32 %t2, %B
%t8 = xor i32 %t5, %t2
- store i32 %t8, i32* @G1
- store i32 %t5, i32* @G2
+ store i32 %t8, ptr @G1
+ store i32 %t5, ptr @G2
ret void
}
ret i32 %r
}
-define i32 @or_xor_extra_use(i32 %a, i32 %b, i32* %p) {
+define i32 @or_xor_extra_use(i32 %a, i32 %b, ptr %p) {
; CHECK-LABEL: @or_xor_extra_use(
; CHECK-NEXT: [[O:%.*]] = or i32 [[A:%.*]], [[B:%.*]]
-; CHECK-NEXT: store i32 [[O]], i32* [[P:%.*]], align 4
+; CHECK-NEXT: store i32 [[O]], ptr [[P:%.*]], align 4
; CHECK-NEXT: [[R:%.*]] = xor i32 [[O]], [[B]]
; CHECK-NEXT: ret i32 [[R]]
;
%o = or i32 %a, %b
- store i32 %o, i32* %p
+ store i32 %o, ptr %p
%r = xor i32 %b, %o
ret i32 %r
}
ret i32 %r
}
-define i32 @and_xor_extra_use(i32 %a, i32 %b, i32* %p) {
+define i32 @and_xor_extra_use(i32 %a, i32 %b, ptr %p) {
; CHECK-LABEL: @and_xor_extra_use(
; CHECK-NEXT: [[O:%.*]] = and i32 [[A:%.*]], [[B:%.*]]
-; CHECK-NEXT: store i32 [[O]], i32* [[P:%.*]], align 4
+; CHECK-NEXT: store i32 [[O]], ptr [[P:%.*]], align 4
; CHECK-NEXT: [[R:%.*]] = xor i32 [[O]], [[B]]
; CHECK-NEXT: ret i32 [[R]]
;
%o = and i32 %a, %b
- store i32 %o, i32* %p
+ store i32 %o, ptr %p
%r = xor i32 %b, %o
ret i32 %r
}
; The extra use (store) is here because the simpler case
; may be transformed using demanded bits.
-define i8 @xor_or_not(i8 %x, i8* %p) {
+define i8 @xor_or_not(i8 %x, ptr %p) {
; CHECK-LABEL: @xor_or_not(
; CHECK-NEXT: [[NX:%.*]] = xor i8 [[X:%.*]], -1
-; CHECK-NEXT: store i8 [[NX]], i8* [[P:%.*]], align 1
+; CHECK-NEXT: store i8 [[NX]], ptr [[P:%.*]], align 1
; CHECK-NEXT: [[TMP1:%.*]] = and i8 [[X]], -8
; CHECK-NEXT: [[R:%.*]] = xor i8 [[TMP1]], -13
; CHECK-NEXT: ret i8 [[R]]
;
%nx = xor i8 %x, -1
- store i8 %nx, i8* %p
+ store i8 %nx, ptr %p
%or = or i8 %nx, 7
%r = xor i8 %or, 12
ret i8 %r
; Don't do this if the 'or' has extra uses.
-define i8 @xor_or_not_uses(i8 %x, i8* %p) {
+define i8 @xor_or_not_uses(i8 %x, ptr %p) {
; CHECK-LABEL: @xor_or_not_uses(
; CHECK-NEXT: [[NX:%.*]] = xor i8 [[X:%.*]], -1
; CHECK-NEXT: [[OR:%.*]] = or i8 [[NX]], 7
-; CHECK-NEXT: store i8 [[OR]], i8* [[P:%.*]], align 1
+; CHECK-NEXT: store i8 [[OR]], ptr [[P:%.*]], align 1
; CHECK-NEXT: [[R:%.*]] = xor i8 [[OR]], 12
; CHECK-NEXT: ret i8 [[R]]
;
%nx = xor i8 %x, -1
%or = or i8 %nx, 7
- store i8 %or, i8* %p
+ store i8 %or, ptr %p
%r = xor i8 %or, 12
ret i8 %r
}
; The extra use (store) is here because the simpler case
; may be transformed using demanded bits.
-define i8 @xor_and_not(i8 %x, i8* %p) {
+define i8 @xor_and_not(i8 %x, ptr %p) {
; CHECK-LABEL: @xor_and_not(
; CHECK-NEXT: [[NX:%.*]] = xor i8 [[X:%.*]], -1
-; CHECK-NEXT: store i8 [[NX]], i8* [[P:%.*]], align 1
+; CHECK-NEXT: store i8 [[NX]], ptr [[P:%.*]], align 1
; CHECK-NEXT: [[TMP1:%.*]] = and i8 [[X]], 42
; CHECK-NEXT: [[R:%.*]] = xor i8 [[TMP1]], 53
; CHECK-NEXT: ret i8 [[R]]
;
%nx = xor i8 %x, -1
- store i8 %nx, i8* %p
+ store i8 %nx, ptr %p
%and = and i8 %nx, 42
%r = xor i8 %and, 31
ret i8 %r
; Don't do this if the 'and' has extra uses.
-define i8 @xor_and_not_uses(i8 %x, i8* %p) {
+define i8 @xor_and_not_uses(i8 %x, ptr %p) {
; CHECK-LABEL: @xor_and_not_uses(
; CHECK-NEXT: [[NX:%.*]] = and i8 [[X:%.*]], 42
; CHECK-NEXT: [[AND:%.*]] = xor i8 [[NX]], 42
-; CHECK-NEXT: store i8 [[AND]], i8* [[P:%.*]], align 1
+; CHECK-NEXT: store i8 [[AND]], ptr [[P:%.*]], align 1
; CHECK-NEXT: [[R:%.*]] = xor i8 [[NX]], 53
; CHECK-NEXT: ret i8 [[R]]
;
%nx = xor i8 %x, -1
%and = and i8 %nx, 42
- store i8 %and, i8* %p
+ store i8 %and, ptr %p
%r = xor i8 %and, 31
ret i8 %r
}
ret <2 x i4> %r
}
-define i4 @or_or_xor_use1(i4 %x, i4 %y, i4 %z, i4* %p) {
+define i4 @or_or_xor_use1(i4 %x, i4 %y, i4 %z, ptr %p) {
; CHECK-LABEL: @or_or_xor_use1(
; CHECK-NEXT: [[O1:%.*]] = or i4 [[Z:%.*]], [[X:%.*]]
-; CHECK-NEXT: store i4 [[O1]], i4* [[P:%.*]], align 1
+; CHECK-NEXT: store i4 [[O1]], ptr [[P:%.*]], align 1
; CHECK-NEXT: [[O2:%.*]] = or i4 [[Z]], [[Y:%.*]]
; CHECK-NEXT: [[R:%.*]] = xor i4 [[O1]], [[O2]]
; CHECK-NEXT: ret i4 [[R]]
;
%o1 = or i4 %z, %x
- store i4 %o1, i4* %p
+ store i4 %o1, ptr %p
%o2 = or i4 %z, %y
%r = xor i4 %o1, %o2
ret i4 %r
}
-define i4 @or_or_xor_use2(i4 %x, i4 %y, i4 %z, i4* %p) {
+define i4 @or_or_xor_use2(i4 %x, i4 %y, i4 %z, ptr %p) {
; CHECK-LABEL: @or_or_xor_use2(
; CHECK-NEXT: [[O1:%.*]] = or i4 [[Z:%.*]], [[X:%.*]]
; CHECK-NEXT: [[O2:%.*]] = or i4 [[Z]], [[Y:%.*]]
-; CHECK-NEXT: store i4 [[O2]], i4* [[P:%.*]], align 1
+; CHECK-NEXT: store i4 [[O2]], ptr [[P:%.*]], align 1
; CHECK-NEXT: [[R:%.*]] = xor i4 [[O1]], [[O2]]
; CHECK-NEXT: ret i4 [[R]]
;
%o1 = or i4 %z, %x
%o2 = or i4 %z, %y
- store i4 %o2, i4* %p
+ store i4 %o2, ptr %p
%r = xor i4 %o1, %o2
ret i4 %r
}
; (B | ~A) ^ A --> ~(A & B)
-define i32 @xor_orn_commute2(i32 %a, i32 %pb,i32* %s) {
+define i32 @xor_orn_commute2(i32 %a, i32 %pb,ptr %s) {
; CHECK-LABEL: @xor_orn_commute2(
; CHECK-NEXT: [[B:%.*]] = udiv i32 42, [[PB:%.*]]
; CHECK-NEXT: [[TMP1:%.*]] = and i32 [[B]], [[A:%.*]]
ret i32 %z
}
-define i32 @xor_orn_commute2_1use(i32 %a, i32 %pb,i32* %s) {
+define i32 @xor_orn_commute2_1use(i32 %a, i32 %pb,ptr %s) {
; CHECK-LABEL: @xor_orn_commute2_1use(
; CHECK-NEXT: [[B:%.*]] = udiv i32 42, [[PB:%.*]]
; CHECK-NEXT: [[NOTA:%.*]] = xor i32 [[A:%.*]], -1
-; CHECK-NEXT: store i32 [[NOTA]], i32* [[S:%.*]], align 4
+; CHECK-NEXT: store i32 [[NOTA]], ptr [[S:%.*]], align 4
; CHECK-NEXT: [[TMP1:%.*]] = and i32 [[B]], [[A]]
; CHECK-NEXT: [[Z:%.*]] = xor i32 [[TMP1]], -1
; CHECK-NEXT: ret i32 [[Z]]
%b = udiv i32 42, %pb
%nota = xor i32 %a, -1
%l = or i32 %b, %nota
- store i32 %nota, i32* %s
+ store i32 %nota, ptr %s
%z = xor i32 %l, %a
ret i32 %z
}
; A ^ (B | ~A) --> ~(A & B)
-define i67 @xor_orn_commute3(i67 %pa, i67 %pb, i67* %s) {
+define i67 @xor_orn_commute3(i67 %pa, i67 %pb, ptr %s) {
; CHECK-LABEL: @xor_orn_commute3(
; CHECK-NEXT: [[A:%.*]] = udiv i67 42, [[PA:%.*]]
; CHECK-NEXT: [[B:%.*]] = udiv i67 42, [[PB:%.*]]
ret i67 %z
}
-define i67 @xor_orn_commute3_1use(i67 %pa, i67 %pb, i67* %s) {
+define i67 @xor_orn_commute3_1use(i67 %pa, i67 %pb, ptr %s) {
; CHECK-LABEL: @xor_orn_commute3_1use(
; CHECK-NEXT: [[A:%.*]] = udiv i67 42, [[PA:%.*]]
; CHECK-NEXT: [[B:%.*]] = udiv i67 42, [[PB:%.*]]
; CHECK-NEXT: [[NOTA:%.*]] = xor i67 [[A]], -1
; CHECK-NEXT: [[L:%.*]] = or i67 [[B]], [[NOTA]]
-; CHECK-NEXT: store i67 [[L]], i67* [[S:%.*]], align 4
+; CHECK-NEXT: store i67 [[L]], ptr [[S:%.*]], align 4
; CHECK-NEXT: [[Z:%.*]] = xor i67 [[A]], [[L]]
; CHECK-NEXT: ret i67 [[Z]]
;
%b = udiv i67 42, %pb
%nota = xor i67 %a, -1
%l = or i67 %b, %nota
- store i67 %l, i67* %s
+ store i67 %l, ptr %s
%z = xor i67 %a, %l
ret i67 %z
}
-define i32 @xor_orn_2use(i32 %a, i32 %b, i32* %s1, i32* %s2) {
+define i32 @xor_orn_2use(i32 %a, i32 %b, ptr %s1, ptr %s2) {
; CHECK-LABEL: @xor_orn_2use(
; CHECK-NEXT: [[NOTA:%.*]] = xor i32 [[A:%.*]], -1
-; CHECK-NEXT: store i32 [[NOTA]], i32* [[S1:%.*]], align 4
+; CHECK-NEXT: store i32 [[NOTA]], ptr [[S1:%.*]], align 4
; CHECK-NEXT: [[L:%.*]] = or i32 [[NOTA]], [[B:%.*]]
-; CHECK-NEXT: store i32 [[L]], i32* [[S2:%.*]], align 4
+; CHECK-NEXT: store i32 [[L]], ptr [[S2:%.*]], align 4
; CHECK-NEXT: [[Z:%.*]] = xor i32 [[L]], [[A]]
; CHECK-NEXT: ret i32 [[Z]]
;
%nota = xor i32 %a, -1
- store i32 %nota, i32* %s1
+ store i32 %nota, ptr %s1
%l = or i32 %nota, %b
- store i32 %l, i32* %s2
+ store i32 %l, ptr %s2
%z = xor i32 %l, %a
ret i32 %z
}
ret i32 %r
}
-define i32 @xor_or_xor_common_op_extra_use1(i32 %a, i32 %b, i32 %c, i32* %p) {
+define i32 @xor_or_xor_common_op_extra_use1(i32 %a, i32 %b, i32 %c, ptr %p) {
; CHECK-LABEL: @xor_or_xor_common_op_extra_use1(
; CHECK-NEXT: [[AC:%.*]] = xor i32 [[A:%.*]], [[C:%.*]]
-; CHECK-NEXT: store i32 [[AC]], i32* [[P:%.*]], align 4
+; CHECK-NEXT: store i32 [[AC]], ptr [[P:%.*]], align 4
; CHECK-NEXT: [[AB:%.*]] = or i32 [[A]], [[B:%.*]]
; CHECK-NEXT: [[R:%.*]] = xor i32 [[AC]], [[AB]]
; CHECK-NEXT: ret i32 [[R]]
;
%ac = xor i32 %a, %c
- store i32 %ac, i32* %p
+ store i32 %ac, ptr %p
%ab = or i32 %a, %b
%r = xor i32 %ac, %ab
ret i32 %r
}
-define i32 @xor_or_xor_common_op_extra_use2(i32 %a, i32 %b, i32 %c, i32* %p) {
+define i32 @xor_or_xor_common_op_extra_use2(i32 %a, i32 %b, i32 %c, ptr %p) {
; CHECK-LABEL: @xor_or_xor_common_op_extra_use2(
; CHECK-NEXT: [[AC:%.*]] = xor i32 [[A:%.*]], [[C:%.*]]
; CHECK-NEXT: [[AB:%.*]] = or i32 [[A]], [[B:%.*]]
-; CHECK-NEXT: store i32 [[AB]], i32* [[P:%.*]], align 4
+; CHECK-NEXT: store i32 [[AB]], ptr [[P:%.*]], align 4
; CHECK-NEXT: [[R:%.*]] = xor i32 [[AC]], [[AB]]
; CHECK-NEXT: ret i32 [[R]]
;
%ac = xor i32 %a, %c
%ab = or i32 %a, %b
- store i32 %ab, i32* %p
+ store i32 %ab, ptr %p
%r = xor i32 %ac, %ab
ret i32 %r
}
-define i32 @xor_or_xor_common_op_extra_use3(i32 %a, i32 %b, i32 %c, i32* %p1, i32* %p2) {
+define i32 @xor_or_xor_common_op_extra_use3(i32 %a, i32 %b, i32 %c, ptr %p1, ptr %p2) {
; CHECK-LABEL: @xor_or_xor_common_op_extra_use3(
; CHECK-NEXT: [[AC:%.*]] = xor i32 [[A:%.*]], [[C:%.*]]
-; CHECK-NEXT: store i32 [[AC]], i32* [[P1:%.*]], align 4
+; CHECK-NEXT: store i32 [[AC]], ptr [[P1:%.*]], align 4
; CHECK-NEXT: [[AB:%.*]] = or i32 [[A]], [[B:%.*]]
-; CHECK-NEXT: store i32 [[AB]], i32* [[P2:%.*]], align 4
+; CHECK-NEXT: store i32 [[AB]], ptr [[P2:%.*]], align 4
; CHECK-NEXT: [[R:%.*]] = xor i32 [[AC]], [[AB]]
; CHECK-NEXT: ret i32 [[R]]
;
%ac = xor i32 %a, %c
- store i32 %ac, i32* %p1
+ store i32 %ac, ptr %p1
%ab = or i32 %a, %b
- store i32 %ab, i32* %p2
+ store i32 %ab, ptr %p2
%r = xor i32 %ac, %ab
ret i32 %r
}
; This would infinite loop because knownbits changed between checking
; if a transform was profitable and actually doing the transform.
-define i1 @PR51762(i32 *%i, i32 %t0, i16 %t1, i64* %p, i32* %d, i32* %f, i32 %p2, i1 %c1) {
+define i1 @PR51762(ptr %i, i32 %t0, i16 %t1, ptr %p, ptr %d, ptr %f, i32 %p2, i1 %c1) {
; CHECK-LABEL: @PR51762(
; CHECK-NEXT: entry:
; CHECK-NEXT: br label [[FOR_COND:%.*]]
; CHECK-NEXT: br label [[FOR_COND]]
; CHECK: for.end11:
; CHECK-NEXT: [[S1:%.*]] = sext i16 [[T1:%.*]] to i64
-; CHECK-NEXT: [[SROA38:%.*]] = load i32, i32* [[I:%.*]], align 8
+; CHECK-NEXT: [[SROA38:%.*]] = load i32, ptr [[I:%.*]], align 8
; CHECK-NEXT: [[INSERT_EXT51:%.*]] = zext i32 [[I_SROA_8_0]] to i64
; CHECK-NEXT: [[INSERT_SHIFT52:%.*]] = shl nuw i64 [[INSERT_EXT51]], 32
; CHECK-NEXT: [[INSERT_EXT39:%.*]] = zext i32 [[SROA38]] to i64
; CHECK-NEXT: [[CMP:%.*]] = icmp eq i64 [[INSERT_INSERT41]], 0
; CHECK-NEXT: [[SPEC_SELECT57:%.*]] = or i1 [[NE]], [[CMP]]
; CHECK-NEXT: [[LOR_EXT:%.*]] = zext i1 [[SPEC_SELECT57]] to i32
-; CHECK-NEXT: [[T2:%.*]] = load i32, i32* [[D:%.*]], align 4
+; CHECK-NEXT: [[T2:%.*]] = load i32, ptr [[D:%.*]], align 4
; CHECK-NEXT: [[CONV15:%.*]] = sext i16 [[T1]] to i32
; CHECK-NEXT: [[CMP16:%.*]] = icmp sge i32 [[T2]], [[CONV15]]
; CHECK-NEXT: [[CONV17:%.*]] = zext i1 [[CMP16]] to i32
-; CHECK-NEXT: [[T3:%.*]] = load i32, i32* [[F:%.*]], align 4
+; CHECK-NEXT: [[T3:%.*]] = load i32, ptr [[F:%.*]], align 4
; CHECK-NEXT: [[ADD:%.*]] = add nsw i32 [[T3]], [[CONV17]]
-; CHECK-NEXT: store i32 [[ADD]], i32* [[F]], align 4
+; CHECK-NEXT: store i32 [[ADD]], ptr [[F]], align 4
; CHECK-NEXT: [[REM18:%.*]] = srem i32 [[LOR_EXT]], [[ADD]]
; CHECK-NEXT: [[CONV19:%.*]] = zext i32 [[REM18]] to i64
-; CHECK-NEXT: store i32 0, i32* [[D]], align 8
+; CHECK-NEXT: store i32 0, ptr [[D]], align 8
; CHECK-NEXT: [[R:%.*]] = icmp ult i64 [[INSERT_INSERT41]], [[CONV19]]
; CHECK-NEXT: call void @llvm.assume(i1 [[R]])
; CHECK-NEXT: ret i1 true
for.end11:
%s1 = sext i16 %t1 to i64
- %sroa38 = load i32, i32* %i, align 8
+ %sroa38 = load i32, ptr %i, align 8
%insert.ext51 = zext i32 %i.sroa.8.0 to i64
%insert.shift52 = shl nuw i64 %insert.ext51, 32
%insert.ext39 = zext i32 %sroa38 to i64
%spec.select57 = or i1 %ne, %cmp
%lor.ext = zext i1 %spec.select57 to i32
- %t2 = load i32, i32* %d, align 4
+ %t2 = load i32, ptr %d, align 4
%conv15 = sext i16 %t1 to i32
%cmp16 = icmp sge i32 %t2, %conv15
%conv17 = zext i1 %cmp16 to i32
- %t3 = load i32, i32* %f, align 4
+ %t3 = load i32, ptr %f, align 4
%add = add nsw i32 %t3, %conv17
- store i32 %add, i32* %f, align 4
+ store i32 %add, ptr %f, align 4
%rem18 = srem i32 %lor.ext, %add
%conv19 = zext i32 %rem18 to i64
%div = udiv i64 %insert.insert41, %conv19
%trunc33 = trunc i64 %div to i32
- store i32 %trunc33, i32* %d, align 8
+ store i32 %trunc33, ptr %d, align 8
%r = icmp ult i64 %insert.insert41, %conv19
call void @llvm.assume(i1 %r)
ret i1 %r