; This test checks that we do not outline different intrinsics as the same
; function or as a value like we would for non-intrinsic functions.
-declare void @llvm.memcpy.p0i8.p0i8.i64(i8* nocapture writeonly, i8* nocapture readonly, i64, i1)
-declare void @llvm.memmove.p0i8.p0i8.i64(i8* nocapture writeonly, i8* nocapture readonly, i64, i1)
+declare void @llvm.memcpy.p0.p0.i64(ptr nocapture writeonly, ptr nocapture readonly, i64, i1)
+declare void @llvm.memmove.p0.p0.i64(ptr nocapture writeonly, ptr nocapture readonly, i64, i1)
-define i8 @function1(i8* noalias %s, i8* noalias %d, i64 %len) {
+define i8 @function1(ptr noalias %s, ptr noalias %d, i64 %len) {
entry:
- %a = load i8, i8* %s
- %b = load i8, i8* %d
- call void @llvm.memcpy.p0i8.p0i8.i64(i8* %d, i8* %s, i64 %len, i1 false)
+ %a = load i8, ptr %s
+ %b = load i8, ptr %d
+ call void @llvm.memcpy.p0.p0.i64(ptr %d, ptr %s, i64 %len, i1 false)
%c = add i8 %a, %b
- %ret = load i8, i8* %s
+ %ret = load i8, ptr %s
ret i8 %ret
}
-define i8 @function2(i8* noalias %s, i8* noalias %d, i64 %len) {
+define i8 @function2(ptr noalias %s, ptr noalias %d, i64 %len) {
entry:
- %a = load i8, i8* %s
- %b = load i8, i8* %d
- call void @llvm.memmove.p0i8.p0i8.i64(i8* %d, i8* %s, i64 %len, i1 false)
+ %a = load i8, ptr %s
+ %b = load i8, ptr %d
+ call void @llvm.memmove.p0.p0.i64(ptr %d, ptr %s, i64 %len, i1 false)
%c = add i8 %a, %b
- %ret = load i8, i8* %s
+ %ret = load i8, ptr %s
ret i8 %ret
}
; CHECK-LABEL: @function1(
; CHECK-NEXT: [[B_LOC:%.*]] = alloca i8, align 1
; CHECK-NEXT: [[A_LOC:%.*]] = alloca i8, align 1
; CHECK-NEXT: [[RET_LOC:%.*]] = alloca i8, align 1
-; CHECK-NEXT: call void @llvm.lifetime.start.p0i8(i64 -1, i8* [[A_LOC]])
-; CHECK-NEXT: call void @llvm.lifetime.start.p0i8(i64 -1, i8* [[B_LOC]])
-; CHECK-NEXT: call void @outlined_ir_func_1(i8* [[S:%.*]], i8* [[D:%.*]], i8* [[A_LOC]], i8* [[B_LOC]])
-; CHECK-NEXT: [[A_RELOAD:%.*]] = load i8, i8* [[A_LOC]], align 1
-; CHECK-NEXT: [[B_RELOAD:%.*]] = load i8, i8* [[B_LOC]], align 1
-; CHECK-NEXT: call void @llvm.lifetime.end.p0i8(i64 -1, i8* [[A_LOC]])
-; CHECK-NEXT: call void @llvm.lifetime.end.p0i8(i64 -1, i8* [[B_LOC]])
-; CHECK-NEXT: call void @llvm.memcpy.p0i8.p0i8.i64(i8* [[D]], i8* [[S]], i64 [[LEN:%.*]], i1 false)
-; CHECK-NEXT: call void @llvm.lifetime.start.p0i8(i64 -1, i8* [[RET_LOC]])
-; CHECK-NEXT: call void @outlined_ir_func_0(i8 [[A_RELOAD]], i8 [[B_RELOAD]], i8* [[S]], i8* [[RET_LOC]])
-; CHECK-NEXT: [[RET_RELOAD:%.*]] = load i8, i8* [[RET_LOC]], align 1
-; CHECK-NEXT: call void @llvm.lifetime.end.p0i8(i64 -1, i8* [[RET_LOC]])
+; CHECK-NEXT: call void @llvm.lifetime.start.p0(i64 -1, ptr [[A_LOC]])
+; CHECK-NEXT: call void @llvm.lifetime.start.p0(i64 -1, ptr [[B_LOC]])
+; CHECK-NEXT: call void @outlined_ir_func_1(ptr [[S:%.*]], ptr [[D:%.*]], ptr [[A_LOC]], ptr [[B_LOC]])
+; CHECK-NEXT: [[A_RELOAD:%.*]] = load i8, ptr [[A_LOC]], align 1
+; CHECK-NEXT: [[B_RELOAD:%.*]] = load i8, ptr [[B_LOC]], align 1
+; CHECK-NEXT: call void @llvm.lifetime.end.p0(i64 -1, ptr [[A_LOC]])
+; CHECK-NEXT: call void @llvm.lifetime.end.p0(i64 -1, ptr [[B_LOC]])
+; CHECK-NEXT: call void @llvm.memcpy.p0.p0.i64(ptr [[D]], ptr [[S]], i64 [[LEN:%.*]], i1 false)
+; CHECK-NEXT: call void @llvm.lifetime.start.p0(i64 -1, ptr [[RET_LOC]])
+; CHECK-NEXT: call void @outlined_ir_func_0(i8 [[A_RELOAD]], i8 [[B_RELOAD]], ptr [[S]], ptr [[RET_LOC]])
+; CHECK-NEXT: [[RET_RELOAD:%.*]] = load i8, ptr [[RET_LOC]], align 1
+; CHECK-NEXT: call void @llvm.lifetime.end.p0(i64 -1, ptr [[RET_LOC]])
; CHECK-NEXT: ret i8 [[RET_RELOAD]]
;
;
; CHECK-NEXT: [[B_LOC:%.*]] = alloca i8, align 1
; CHECK-NEXT: [[A_LOC:%.*]] = alloca i8, align 1
; CHECK-NEXT: [[RET_LOC:%.*]] = alloca i8, align 1
-; CHECK-NEXT: call void @llvm.lifetime.start.p0i8(i64 -1, i8* [[A_LOC]])
-; CHECK-NEXT: call void @llvm.lifetime.start.p0i8(i64 -1, i8* [[B_LOC]])
-; CHECK-NEXT: call void @outlined_ir_func_1(i8* [[S:%.*]], i8* [[D:%.*]], i8* [[A_LOC]], i8* [[B_LOC]])
-; CHECK-NEXT: [[A_RELOAD:%.*]] = load i8, i8* [[A_LOC]], align 1
-; CHECK-NEXT: [[B_RELOAD:%.*]] = load i8, i8* [[B_LOC]], align 1
-; CHECK-NEXT: call void @llvm.lifetime.end.p0i8(i64 -1, i8* [[A_LOC]])
-; CHECK-NEXT: call void @llvm.lifetime.end.p0i8(i64 -1, i8* [[B_LOC]])
-; CHECK-NEXT: call void @llvm.memmove.p0i8.p0i8.i64(i8* [[D]], i8* [[S]], i64 [[LEN:%.*]], i1 false)
-; CHECK-NEXT: call void @llvm.lifetime.start.p0i8(i64 -1, i8* [[RET_LOC]])
-; CHECK-NEXT: call void @outlined_ir_func_0(i8 [[A_RELOAD]], i8 [[B_RELOAD]], i8* [[S]], i8* [[RET_LOC]])
-; CHECK-NEXT: [[RET_RELOAD:%.*]] = load i8, i8* [[RET_LOC]], align 1
-; CHECK-NEXT: call void @llvm.lifetime.end.p0i8(i64 -1, i8* [[RET_LOC]])
+; CHECK-NEXT: call void @llvm.lifetime.start.p0(i64 -1, ptr [[A_LOC]])
+; CHECK-NEXT: call void @llvm.lifetime.start.p0(i64 -1, ptr [[B_LOC]])
+; CHECK-NEXT: call void @outlined_ir_func_1(ptr [[S:%.*]], ptr [[D:%.*]], ptr [[A_LOC]], ptr [[B_LOC]])
+; CHECK-NEXT: [[A_RELOAD:%.*]] = load i8, ptr [[A_LOC]], align 1
+; CHECK-NEXT: [[B_RELOAD:%.*]] = load i8, ptr [[B_LOC]], align 1
+; CHECK-NEXT: call void @llvm.lifetime.end.p0(i64 -1, ptr [[A_LOC]])
+; CHECK-NEXT: call void @llvm.lifetime.end.p0(i64 -1, ptr [[B_LOC]])
+; CHECK-NEXT: call void @llvm.memmove.p0.p0.i64(ptr [[D]], ptr [[S]], i64 [[LEN:%.*]], i1 false)
+; CHECK-NEXT: call void @llvm.lifetime.start.p0(i64 -1, ptr [[RET_LOC]])
+; CHECK-NEXT: call void @outlined_ir_func_0(i8 [[A_RELOAD]], i8 [[B_RELOAD]], ptr [[S]], ptr [[RET_LOC]])
+; CHECK-NEXT: [[RET_RELOAD:%.*]] = load i8, ptr [[RET_LOC]], align 1
+; CHECK-NEXT: call void @llvm.lifetime.end.p0(i64 -1, ptr [[RET_LOC]])
; CHECK-NEXT: ret i8 [[RET_RELOAD]]
;
;
; CHECK-NEXT: br label [[ENTRY_TO_OUTLINE:%.*]]
; CHECK: entry_to_outline:
; CHECK-NEXT: [[C:%.*]] = add i8 [[TMP0:%.*]], [[TMP1:%.*]]
-; CHECK-NEXT: [[RET:%.*]] = load i8, i8* [[TMP2:%.*]], align 1
+; CHECK-NEXT: [[RET:%.*]] = load i8, ptr [[TMP2:%.*]], align 1
; CHECK-NEXT: br label [[ENTRY_AFTER_OUTLINE_EXITSTUB:%.*]]
; CHECK: entry_after_outline.exitStub:
-; CHECK-NEXT: store i8 [[RET]], i8* [[TMP3:%.*]], align 1
+; CHECK-NEXT: store i8 [[RET]], ptr [[TMP3:%.*]], align 1
; CHECK-NEXT: ret void
;
;
; CHECK-NEXT: newFuncRoot:
; CHECK-NEXT: br label [[ENTRY_TO_OUTLINE:%.*]]
; CHECK: entry_to_outline:
-; CHECK-NEXT: [[A:%.*]] = load i8, i8* [[TMP0:%.*]], align 1
-; CHECK-NEXT: [[B:%.*]] = load i8, i8* [[TMP1:%.*]], align 1
+; CHECK-NEXT: [[A:%.*]] = load i8, ptr [[TMP0:%.*]], align 1
+; CHECK-NEXT: [[B:%.*]] = load i8, ptr [[TMP1:%.*]], align 1
; CHECK-NEXT: br label [[ENTRY_AFTER_OUTLINE_EXITSTUB:%.*]]
; CHECK: entry_after_outline.exitStub:
-; CHECK-NEXT: store i8 [[A]], i8* [[TMP2:%.*]], align 1
-; CHECK-NEXT: store i8 [[B]], i8* [[TMP3:%.*]], align 1
+; CHECK-NEXT: store i8 [[A]], ptr [[TMP2:%.*]], align 1
+; CHECK-NEXT: store i8 [[B]], ptr [[TMP3:%.*]], align 1
; CHECK-NEXT: ret void
;
; CHECK-NEXT: [[A:%.*]] = alloca i32, align 4
; CHECK-NEXT: [[B:%.*]] = alloca i32, align 4
; CHECK-NEXT: [[C:%.*]] = alloca i32, align 4
-; CHECK-NEXT: call void @outlined_ir_func_1(i32* [[A]], i32* [[B]], i32* [[C]])
+; CHECK-NEXT: call void @outlined_ir_func_1(ptr [[A]], ptr [[B]], ptr [[C]])
; CHECK-NEXT: ret void
;
entry:
%a = alloca i32, align 4
%b = alloca i32, align 4
%c = alloca i32, align 4
- store i32 2, i32* %a, align 4
- store i32 3, i32* %b, align 4
- store i32 4, i32* %c, align 4
- %al = load i32, i32* %a
- %bl = load i32, i32* %b
- %cl = load i32, i32* %c
+ store i32 2, ptr %a, align 4
+ store i32 3, ptr %b, align 4
+ store i32 4, ptr %c, align 4
+ %al = load i32, ptr %a
+ %bl = load i32, ptr %b
+ %cl = load i32, ptr %c
ret void
}
; CHECK-NEXT: [[A:%.*]] = alloca i32, align 4
; CHECK-NEXT: [[B:%.*]] = alloca i32, align 4
; CHECK-NEXT: [[C:%.*]] = alloca i32, align 4
-; CHECK-NEXT: call void @outlined_ir_func_1(i32* [[A]], i32* [[B]], i32* [[C]])
+; CHECK-NEXT: call void @outlined_ir_func_1(ptr [[A]], ptr [[B]], ptr [[C]])
; CHECK-NEXT: ret void
;
entry:
%a = alloca i32, align 4
%b = alloca i32, align 4
%c = alloca i32, align 4
- store i32 2, i32* %a, align 4
- store i32 3, i32* %b, align 4
- store i32 4, i32* %c, align 4
- %al = load i32, i32* %a
- %bl = load i32, i32* %b
- %cl = load i32, i32* %c
+ store i32 2, ptr %a, align 4
+ store i32 3, ptr %b, align 4
+ store i32 4, ptr %c, align 4
+ %al = load i32, ptr %a
+ %bl = load i32, ptr %b
+ %cl = load i32, ptr %c
ret void
}
; CHECK-NEXT: [[B:%.*]] = alloca i32, align 4
; CHECK-NEXT: [[OUTPUT:%.*]] = alloca i32, align 4
; CHECK-NEXT: [[RESULT:%.*]] = alloca i32, align 4
-; CHECK-NEXT: [[LT_CAST:%.*]] = bitcast i32* [[ADD_LOC]] to i8*
-; CHECK-NEXT: call void @llvm.lifetime.start.p0i8(i64 -1, i8* [[LT_CAST]])
-; CHECK-NEXT: [[LT_CAST1:%.*]] = bitcast i32* [[DOTLOC]] to i8*
-; CHECK-NEXT: call void @llvm.lifetime.start.p0i8(i64 -1, i8* [[LT_CAST1]])
-; CHECK-NEXT: call void @outlined_ir_func_0(i32* [[A]], i32* [[B]], i32* [[OUTPUT]], i32* [[ADD_LOC]], i32* [[DOTLOC]])
-; CHECK-NEXT: [[ADD_RELOAD:%.*]] = load i32, i32* [[ADD_LOC]], align 4
-; CHECK-NEXT: [[DOTRELOAD:%.*]] = load i32, i32* [[DOTLOC]], align 4
-; CHECK-NEXT: call void @llvm.lifetime.end.p0i8(i64 -1, i8* [[LT_CAST]])
-; CHECK-NEXT: call void @llvm.lifetime.end.p0i8(i64 -1, i8* [[LT_CAST1]])
-; CHECK-NEXT: [[TMP0:%.*]] = load i32, i32* [[OUTPUT]], align 4
-; CHECK-NEXT: call void @outlined_ir_func_2(i32 [[DOTRELOAD]], i32 [[ADD_RELOAD]], i32* [[RESULT]])
+; CHECK-NEXT: call void @llvm.lifetime.start.p0(i64 -1, ptr [[ADD_LOC]])
+; CHECK-NEXT: call void @llvm.lifetime.start.p0(i64 -1, ptr [[DOTLOC]])
+; CHECK-NEXT: call void @outlined_ir_func_0(ptr [[A]], ptr [[B]], ptr [[OUTPUT]], ptr [[ADD_LOC]], ptr [[DOTLOC]])
+; CHECK-NEXT: [[ADD_RELOAD:%.*]] = load i32, ptr [[ADD_LOC]], align 4
+; CHECK-NEXT: [[DOTRELOAD:%.*]] = load i32, ptr [[DOTLOC]], align 4
+; CHECK-NEXT: call void @llvm.lifetime.end.p0(i64 -1, ptr [[ADD_LOC]])
+; CHECK-NEXT: call void @llvm.lifetime.end.p0(i64 -1, ptr [[DOTLOC]])
+; CHECK-NEXT: [[TMP0:%.*]] = load i32, ptr [[OUTPUT]], align 4
+; CHECK-NEXT: call void @outlined_ir_func_2(i32 [[DOTRELOAD]], i32 [[ADD_RELOAD]], ptr [[RESULT]])
; CHECK-NEXT: ret void
;
entry:
%b = alloca i32, align 4
%output = alloca i32, align 4
%result = alloca i32, align 4
- store i32 2, i32* %a, align 4
- store i32 3, i32* %b, align 4
- %0 = load i32, i32* %a, align 4
- %1 = load i32, i32* %b, align 4
+ store i32 2, ptr %a, align 4
+ store i32 3, ptr %b, align 4
+ %0 = load i32, ptr %a, align 4
+ %1 = load i32, ptr %b, align 4
%add = add i32 %0, %1
- store i32 %add, i32* %output, align 4
- %2 = load i32, i32* %output, align 4
- %3 = load i32, i32* %output, align 4
+ store i32 %add, ptr %output, align 4
+ %2 = load i32, ptr %output, align 4
+ %3 = load i32, ptr %output, align 4
%mul = mul i32 %2, %add
- store i32 %mul, i32* %result, align 4
+ store i32 %mul, ptr %result, align 4
ret void
}
; CHECK-NEXT: [[B:%.*]] = alloca i32, align 4
; CHECK-NEXT: [[OUTPUT:%.*]] = alloca i32, align 4
; CHECK-NEXT: [[RESULT:%.*]] = alloca i32, align 4
-; CHECK-NEXT: [[LT_CAST:%.*]] = bitcast i32* [[ADD_LOC]] to i8*
-; CHECK-NEXT: call void @llvm.lifetime.start.p0i8(i64 -1, i8* [[LT_CAST]])
-; CHECK-NEXT: [[LT_CAST1:%.*]] = bitcast i32* [[DOTLOC]] to i8*
-; CHECK-NEXT: call void @llvm.lifetime.start.p0i8(i64 -1, i8* [[LT_CAST1]])
-; CHECK-NEXT: call void @outlined_ir_func_0(i32* [[A]], i32* [[B]], i32* [[OUTPUT]], i32* [[ADD_LOC]], i32* [[DOTLOC]])
-; CHECK-NEXT: [[ADD_RELOAD:%.*]] = load i32, i32* [[ADD_LOC]], align 4
-; CHECK-NEXT: [[DOTRELOAD:%.*]] = load i32, i32* [[DOTLOC]], align 4
-; CHECK-NEXT: call void @llvm.lifetime.end.p0i8(i64 -1, i8* [[LT_CAST]])
-; CHECK-NEXT: call void @llvm.lifetime.end.p0i8(i64 -1, i8* [[LT_CAST1]])
-; CHECK-NEXT: call void @outlined_ir_func_2(i32 [[DOTRELOAD]], i32 [[ADD_RELOAD]], i32* [[RESULT]])
+; CHECK-NEXT: call void @llvm.lifetime.start.p0(i64 -1, ptr [[ADD_LOC]])
+; CHECK-NEXT: call void @llvm.lifetime.start.p0(i64 -1, ptr [[DOTLOC]])
+; CHECK-NEXT: call void @outlined_ir_func_0(ptr [[A]], ptr [[B]], ptr [[OUTPUT]], ptr [[ADD_LOC]], ptr [[DOTLOC]])
+; CHECK-NEXT: [[ADD_RELOAD:%.*]] = load i32, ptr [[ADD_LOC]], align 4
+; CHECK-NEXT: [[DOTRELOAD:%.*]] = load i32, ptr [[DOTLOC]], align 4
+; CHECK-NEXT: call void @llvm.lifetime.end.p0(i64 -1, ptr [[ADD_LOC]])
+; CHECK-NEXT: call void @llvm.lifetime.end.p0(i64 -1, ptr [[DOTLOC]])
+; CHECK-NEXT: call void @outlined_ir_func_2(i32 [[DOTRELOAD]], i32 [[ADD_RELOAD]], ptr [[RESULT]])
; CHECK-NEXT: ret void
;
entry:
%b = alloca i32, align 4
%output = alloca i32, align 4
%result = alloca i32, align 4
- store i32 2, i32* %a, align 4
- store i32 3, i32* %b, align 4
- %0 = load i32, i32* %a, align 4
- %1 = load i32, i32* %b, align 4
+ store i32 2, ptr %a, align 4
+ store i32 3, ptr %b, align 4
+ %0 = load i32, ptr %a, align 4
+ %1 = load i32, ptr %b, align 4
%add = add i32 %0, %1
- store i32 %add, i32* %output, align 4
- %2 = load i32, i32* %output, align 4
+ store i32 %add, ptr %output, align 4
+ %2 = load i32, ptr %output, align 4
%mul = mul i32 %2, %add
- store i32 %mul, i32* %result, align 4
+ store i32 %mul, ptr %result, align 4
ret void
}
; Show that we do differentiate between outputs of the region stored in PHINodes
; versus those stored outside of PHINodes.
-define void @function1(i32* %a, i32* %b) {
+define void @function1(ptr %a, ptr %b) {
entry:
%0 = alloca i32, align 4
- %c = load i32, i32* %0, align 4
+ %c = load i32, ptr %0, align 4
br label %test1
test1:
- %e = load i32, i32* %0, align 4
+ %e = load i32, ptr %0, align 4
br i1 true, label %first, label %test
test:
- %d = load i32, i32* %0, align 4
+ %d = load i32, ptr %0, align 4
br i1 true, label %first, label %next
first:
%1 = phi i32 [ %c, %test ], [ %e, %test1 ]
ret void
}
-define void @function2(i32* %a, i32* %b) {
+define void @function2(ptr %a, ptr %b) {
entry:
%0 = alloca i32, align 4
- %c = load i32, i32* %0, align 4
+ %c = load i32, ptr %0, align 4
br label %test1
test1:
- %e = load i32, i32* %0, align 4
+ %e = load i32, ptr %0, align 4
br i1 true, label %first, label %test
test:
- %d = load i32, i32* %0, align 4
+ %d = load i32, ptr %0, align 4
br i1 true, label %first, label %next
first:
ret void
; CHECK-NEXT: entry:
; CHECK-NEXT: [[DOTCE_LOC:%.*]] = alloca i32, align 4
; CHECK-NEXT: [[TMP0:%.*]] = alloca i32, align 4
-; CHECK-NEXT: [[LT_CAST:%.*]] = bitcast i32* [[DOTCE_LOC]] to i8*
-; CHECK-NEXT: call void @llvm.lifetime.start.p0i8(i64 -1, i8* [[LT_CAST]])
-; CHECK-NEXT: [[TMP1:%.*]] = call i1 @outlined_ir_func_0(i32* [[TMP0]], i32* [[DOTCE_LOC]], i32* null, i32 0)
-; CHECK-NEXT: [[DOTCE_RELOAD:%.*]] = load i32, i32* [[DOTCE_LOC]], align 4
-; CHECK-NEXT: call void @llvm.lifetime.end.p0i8(i64 -1, i8* [[LT_CAST]])
+; CHECK-NEXT: call void @llvm.lifetime.start.p0(i64 -1, ptr [[DOTCE_LOC]])
+; CHECK-NEXT: [[TMP1:%.*]] = call i1 @outlined_ir_func_0(ptr [[TMP0]], ptr [[DOTCE_LOC]], ptr null, i32 0)
+; CHECK-NEXT: [[DOTCE_RELOAD:%.*]] = load i32, ptr [[DOTCE_LOC]], align 4
+; CHECK-NEXT: call void @llvm.lifetime.end.p0(i64 -1, ptr [[DOTCE_LOC]])
; CHECK-NEXT: br i1 [[TMP1]], label [[FIRST:%.*]], label [[NEXT:%.*]]
; CHECK: first:
; CHECK-NEXT: [[TMP2:%.*]] = phi i32 [ [[DOTCE_RELOAD]], [[ENTRY:%.*]] ]
; CHECK-NEXT: [[E_LOC:%.*]] = alloca i32, align 4
; CHECK-NEXT: [[C_LOC:%.*]] = alloca i32, align 4
; CHECK-NEXT: [[TMP0:%.*]] = alloca i32, align 4
-; CHECK-NEXT: [[LT_CAST:%.*]] = bitcast i32* [[C_LOC]] to i8*
-; CHECK-NEXT: call void @llvm.lifetime.start.p0i8(i64 -1, i8* [[LT_CAST]])
-; CHECK-NEXT: [[LT_CAST1:%.*]] = bitcast i32* [[E_LOC]] to i8*
-; CHECK-NEXT: call void @llvm.lifetime.start.p0i8(i64 -1, i8* [[LT_CAST1]])
-; CHECK-NEXT: [[TMP1:%.*]] = call i1 @outlined_ir_func_0(i32* [[TMP0]], i32* [[C_LOC]], i32* [[E_LOC]], i32 1)
-; CHECK-NEXT: [[C_RELOAD:%.*]] = load i32, i32* [[C_LOC]], align 4
-; CHECK-NEXT: [[E_RELOAD:%.*]] = load i32, i32* [[E_LOC]], align 4
-; CHECK-NEXT: call void @llvm.lifetime.end.p0i8(i64 -1, i8* [[LT_CAST]])
-; CHECK-NEXT: call void @llvm.lifetime.end.p0i8(i64 -1, i8* [[LT_CAST1]])
+; CHECK-NEXT: call void @llvm.lifetime.start.p0(i64 -1, ptr [[C_LOC]])
+; CHECK-NEXT: call void @llvm.lifetime.start.p0(i64 -1, ptr [[E_LOC]])
+; CHECK-NEXT: [[TMP1:%.*]] = call i1 @outlined_ir_func_0(ptr [[TMP0]], ptr [[C_LOC]], ptr [[E_LOC]], i32 1)
+; CHECK-NEXT: [[C_RELOAD:%.*]] = load i32, ptr [[C_LOC]], align 4
+; CHECK-NEXT: [[E_RELOAD:%.*]] = load i32, ptr [[E_LOC]], align 4
+; CHECK-NEXT: call void @llvm.lifetime.end.p0(i64 -1, ptr [[C_LOC]])
+; CHECK-NEXT: call void @llvm.lifetime.end.p0(i64 -1, ptr [[E_LOC]])
; CHECK-NEXT: br i1 [[TMP1]], label [[FIRST:%.*]], label [[NEXT:%.*]]
; CHECK: first:
; CHECK-NEXT: ret void
; CHECK-NEXT: newFuncRoot:
; CHECK-NEXT: br label [[ENTRY_TO_OUTLINE:%.*]]
; CHECK: entry_to_outline:
-; CHECK-NEXT: [[C:%.*]] = load i32, i32* [[TMP0:%.*]], align 4
+; CHECK-NEXT: [[C:%.*]] = load i32, ptr [[TMP0:%.*]], align 4
; CHECK-NEXT: br label [[TEST1:%.*]]
; CHECK: test1:
-; CHECK-NEXT: [[E:%.*]] = load i32, i32* [[TMP0]], align 4
+; CHECK-NEXT: [[E:%.*]] = load i32, ptr [[TMP0]], align 4
; CHECK-NEXT: br i1 true, label [[FIRST_SPLIT:%.*]], label [[TEST:%.*]]
; CHECK: test:
-; CHECK-NEXT: [[D:%.*]] = load i32, i32* [[TMP0]], align 4
+; CHECK-NEXT: [[D:%.*]] = load i32, ptr [[TMP0]], align 4
; CHECK-NEXT: br i1 true, label [[FIRST_SPLIT]], label [[NEXT_EXITSTUB:%.*]]
; CHECK: first.split:
; CHECK-NEXT: [[DOTCE:%.*]] = phi i32 [ [[C]], [[TEST]] ], [ [[E]], [[TEST1]] ]
; CHECK-NEXT: i32 0, label [[OUTPUT_BLOCK_1_0:%.*]]
; CHECK-NEXT: ]
; CHECK: output_block_0_1:
-; CHECK-NEXT: store i32 [[DOTCE]], i32* [[TMP1:%.*]], align 4
+; CHECK-NEXT: store i32 [[DOTCE]], ptr [[TMP1:%.*]], align 4
; CHECK-NEXT: br label [[FINAL_BLOCK_1]]
; CHECK: output_block_1_0:
-; CHECK-NEXT: store i32 [[C]], i32* [[TMP1]], align 4
-; CHECK-NEXT: store i32 [[E]], i32* [[TMP2:%.*]], align 4
+; CHECK-NEXT: store i32 [[C]], ptr [[TMP1]], align 4
+; CHECK-NEXT: store i32 [[E]], ptr [[TMP2:%.*]], align 4
; CHECK-NEXT: br label [[FINAL_BLOCK_0]]
; CHECK: output_block_1_1:
-; CHECK-NEXT: store i32 [[C]], i32* [[TMP1]], align 4
-; CHECK-NEXT: store i32 [[E]], i32* [[TMP2]], align 4
+; CHECK-NEXT: store i32 [[C]], ptr [[TMP1]], align 4
+; CHECK-NEXT: store i32 [[E]], ptr [[TMP2]], align 4
; CHECK-NEXT: br label [[FINAL_BLOCK_1]]
; CHECK: final_block_0:
; CHECK-NEXT: ret i1 false
; CHECK-NEXT: [[B:%.*]] = alloca i32, align 4
; CHECK-NEXT: [[C:%.*]] = alloca i32, align 4
; CHECK-NEXT: [[D:%.*]] = alloca i1, align 4
-; CHECK-NEXT: [[LT_CAST:%.*]] = bitcast i1* [[DL_LOC]] to i8*
-; CHECK-NEXT: call void @llvm.lifetime.start.p0i8(i64 -1, i8* [[LT_CAST]])
-; CHECK-NEXT: call void @outlined_ir_func_3(i1 true, i1* [[D]], i1* [[DL_LOC]])
-; CHECK-NEXT: [[DL_RELOAD:%.*]] = load i1, i1* [[DL_LOC]], align 1
-; CHECK-NEXT: call void @llvm.lifetime.end.p0i8(i64 -1, i8* [[LT_CAST]])
+; CHECK-NEXT: call void @llvm.lifetime.start.p0(i64 -1, ptr [[DL_LOC]])
+; CHECK-NEXT: call void @outlined_ir_func_3(i1 true, ptr [[D]], ptr [[DL_LOC]])
+; CHECK-NEXT: [[DL_RELOAD:%.*]] = load i1, ptr [[DL_LOC]], align 1
+; CHECK-NEXT: call void @llvm.lifetime.end.p0(i64 -1, ptr [[DL_LOC]])
; CHECK-NEXT: [[SPLIT_INST:%.*]] = sub i1 [[DL_RELOAD]], [[DL_RELOAD]]
-; CHECK-NEXT: call void @outlined_ir_func_0(i32* [[A]], i32* [[B]], i32* [[C]])
+; CHECK-NEXT: call void @outlined_ir_func_0(ptr [[A]], ptr [[B]], ptr [[C]])
; CHECK-NEXT: call void @llvm.assume(i1 [[DL_RELOAD]])
-; CHECK-NEXT: call void @outlined_ir_func_1(i32* [[A]], i32* [[B]], i32* [[C]])
+; CHECK-NEXT: call void @outlined_ir_func_1(ptr [[A]], ptr [[B]], ptr [[C]])
; CHECK-NEXT: ret void
;
entry:
%b = alloca i32, align 4
%c = alloca i32, align 4
%d = alloca i1, align 4
- store i1 1, i1* %d, align 4
- %dl = load i1, i1* %d
+ store i1 1, ptr %d, align 4
+ %dl = load i1, ptr %d
%split_inst = sub i1 %dl, %dl
- store i32 2, i32* %a, align 4
- store i32 3, i32* %b, align 4
- store i32 4, i32* %c, align 4
+ store i32 2, ptr %a, align 4
+ store i32 3, ptr %b, align 4
+ store i32 4, ptr %c, align 4
call void @llvm.assume(i1 %dl)
- %al = load i32, i32* %a
- %bl = load i32, i32* %b
- %cl = load i32, i32* %c
+ %al = load i32, ptr %a
+ %bl = load i32, ptr %b
+ %cl = load i32, ptr %c
ret void
}
; CHECK-NEXT: [[B:%.*]] = alloca i32, align 4
; CHECK-NEXT: [[C:%.*]] = alloca i32, align 4
; CHECK-NEXT: [[D:%.*]] = alloca i1, align 4
-; CHECK-NEXT: [[LT_CAST:%.*]] = bitcast i1* [[DL_LOC]] to i8*
-; CHECK-NEXT: call void @llvm.lifetime.start.p0i8(i64 -1, i8* [[LT_CAST]])
-; CHECK-NEXT: call void @outlined_ir_func_3(i1 false, i1* [[D]], i1* [[DL_LOC]])
-; CHECK-NEXT: [[DL_RELOAD:%.*]] = load i1, i1* [[DL_LOC]], align 1
-; CHECK-NEXT: call void @llvm.lifetime.end.p0i8(i64 -1, i8* [[LT_CAST]])
-; CHECK-NEXT: call void @outlined_ir_func_0(i32* [[A]], i32* [[B]], i32* [[C]])
+; CHECK-NEXT: call void @llvm.lifetime.start.p0(i64 -1, ptr [[DL_LOC]])
+; CHECK-NEXT: call void @outlined_ir_func_3(i1 false, ptr [[D]], ptr [[DL_LOC]])
+; CHECK-NEXT: [[DL_RELOAD:%.*]] = load i1, ptr [[DL_LOC]], align 1
+; CHECK-NEXT: call void @llvm.lifetime.end.p0(i64 -1, ptr [[DL_LOC]])
+; CHECK-NEXT: call void @outlined_ir_func_0(ptr [[A]], ptr [[B]], ptr [[C]])
; CHECK-NEXT: call void @llvm.assume(i1 [[DL_RELOAD]])
-; CHECK-NEXT: call void @outlined_ir_func_1(i32* [[A]], i32* [[B]], i32* [[C]])
+; CHECK-NEXT: call void @outlined_ir_func_1(ptr [[A]], ptr [[B]], ptr [[C]])
; CHECK-NEXT: ret void
;
entry:
%b = alloca i32, align 4
%c = alloca i32, align 4
%d = alloca i1, align 4
- store i1 0, i1* %d, align 4
- %dl = load i1, i1* %d
- store i32 2, i32* %a, align 4
- store i32 3, i32* %b, align 4
- store i32 4, i32* %c, align 4
+ store i1 0, ptr %d, align 4
+ %dl = load i1, ptr %d
+ store i32 2, ptr %a, align 4
+ store i32 3, ptr %b, align 4
+ store i32 4, ptr %c, align 4
call void @llvm.assume(i1 %dl)
- %al = load i32, i32* %a
- %bl = load i32, i32* %b
- %cl = load i32, i32* %c
+ %al = load i32, ptr %a
+ %bl = load i32, ptr %b
+ %cl = load i32, ptr %c
ret void
}
; CHECK-NEXT: [[B:%.*]] = alloca i32, align 4
; CHECK-NEXT: [[C:%.*]] = alloca i32, align 4
; CHECK-NEXT: [[D:%.*]] = alloca i1, align 4
-; CHECK-NEXT: store i1 true, i1* [[D]], align 4
-; CHECK-NEXT: [[DL:%.*]] = load i1, i1* [[D]], align 1
+; CHECK-NEXT: store i1 true, ptr [[D]], align 4
+; CHECK-NEXT: [[DL:%.*]] = load i1, ptr [[D]], align 1
; CHECK-NEXT: [[SPLIT_INST:%.*]] = add i1 [[DL]], [[DL]]
-; CHECK-NEXT: call void @outlined_ir_func_0(i32* [[A]], i32* [[B]], i32* [[C]])
+; CHECK-NEXT: call void @outlined_ir_func_0(ptr [[A]], ptr [[B]], ptr [[C]])
; CHECK-NEXT: call void @llvm.assume(i1 [[DL]])
-; CHECK-NEXT: call void @outlined_ir_func_2(i32* [[A]])
+; CHECK-NEXT: call void @outlined_ir_func_2(ptr [[A]])
; CHECK-NEXT: ret void
;
entry:
%b = alloca i32, align 4
%c = alloca i32, align 4
%d = alloca i1, align 4
- store i1 1, i1* %d, align 4
- %dl = load i1, i1* %d
+ store i1 1, ptr %d, align 4
+ %dl = load i1, ptr %d
%split_inst = add i1 %dl, %dl
- store i32 2, i32* %a, align 4
- store i32 3, i32* %b, align 4
- store i32 4, i32* %c, align 4
+ store i32 2, ptr %a, align 4
+ store i32 3, ptr %b, align 4
+ store i32 4, ptr %c, align 4
call void @llvm.assume(i1 %dl)
- %al = load i32, i32* %a
+ %al = load i32, ptr %a
%bl = add i32 %al, %al
ret void
}
; CHECK-NEXT: [[B:%.*]] = alloca i32, align 4
; CHECK-NEXT: [[C:%.*]] = alloca i32, align 4
; CHECK-NEXT: [[D:%.*]] = alloca i1, align 4
-; CHECK-NEXT: store i1 false, i1* [[D]], align 4
-; CHECK-NEXT: [[DL:%.*]] = load i1, i1* [[D]], align 1
+; CHECK-NEXT: store i1 false, ptr [[D]], align 4
+; CHECK-NEXT: [[DL:%.*]] = load i1, ptr [[D]], align 1
; CHECK-NEXT: [[SPLIT_INST:%.*]] = add i1 [[DL]], [[DL]]
-; CHECK-NEXT: call void @outlined_ir_func_0(i32* [[A]], i32* [[B]], i32* [[C]])
+; CHECK-NEXT: call void @outlined_ir_func_0(ptr [[A]], ptr [[B]], ptr [[C]])
; CHECK-NEXT: call void @llvm.assume(i1 [[DL]])
-; CHECK-NEXT: call void @outlined_ir_func_2(i32* [[A]])
+; CHECK-NEXT: call void @outlined_ir_func_2(ptr [[A]])
; CHECK-NEXT: ret void
;
entry:
%b = alloca i32, align 4
%c = alloca i32, align 4
%d = alloca i1, align 4
- store i1 0, i1* %d, align 4
- %dl = load i1, i1* %d
+ store i1 0, ptr %d, align 4
+ %dl = load i1, ptr %d
%split_inst = add i1 %dl, %dl
- store i32 2, i32* %a, align 4
- store i32 3, i32* %b, align 4
- store i32 4, i32* %c, align 4
+ store i32 2, ptr %a, align 4
+ store i32 3, ptr %b, align 4
+ store i32 4, ptr %c, align 4
call void @llvm.assume(i1 %dl)
- %al = load i32, i32* %a
+ %al = load i32, ptr %a
%bl = add i32 %al, %al
ret void
}
; CHECK-NEXT: [[A:%.*]] = alloca i32, align 4
; CHECK-NEXT: [[B:%.*]] = alloca i32, align 4
; CHECK-NEXT: [[C:%.*]] = alloca i32, align 4
-; CHECK-NEXT: call void @outlined_ir_func_0(i32* [[A]], i32* [[B]], i32* [[C]])
+; CHECK-NEXT: call void @outlined_ir_func_0(ptr [[A]], ptr [[B]], ptr [[C]])
; CHECK-NEXT: br label [[NEXT:%.*]]
; CHECK: next:
; CHECK-NEXT: ret void
%a = alloca i32, align 4
%b = alloca i32, align 4
%c = alloca i32, align 4
- store i32 2, i32* %a, align 4
- store i32 3, i32* %b, align 4
- store i32 4, i32* %c, align 4
+ store i32 2, ptr %a, align 4
+ store i32 3, ptr %b, align 4
+ store i32 4, ptr %c, align 4
br label %next
next:
ret void
; CHECK-NEXT: [[A:%.*]] = alloca i32, align 4
; CHECK-NEXT: [[B:%.*]] = alloca i32, align 4
; CHECK-NEXT: [[C:%.*]] = alloca i32, align 4
-; CHECK-NEXT: call void @outlined_ir_func_0(i32* [[A]], i32* [[B]], i32* [[C]])
+; CHECK-NEXT: call void @outlined_ir_func_0(ptr [[A]], ptr [[B]], ptr [[C]])
; CHECK-NEXT: br label [[NEXT:%.*]]
; CHECK: next:
; CHECK-NEXT: ret void
%a = alloca i32, align 4
%b = alloca i32, align 4
%c = alloca i32, align 4
- store i32 2, i32* %a, align 4
- store i32 3, i32* %b, align 4
- store i32 4, i32* %c, align 4
+ store i32 2, ptr %a, align 4
+ store i32 3, ptr %b, align 4
+ store i32 4, ptr %c, align 4
br label %next
next:
ret void
; CHECK-NEXT: [[CS1:%.*]] = catchswitch within none [label %catchpad1] unwind to caller
; CHECK: catchpad1:
; CHECK-NEXT: [[TMP0:%.*]] = catchpad within [[CS1]] []
-; CHECK-NEXT: call void @outlined_ir_func_0(i32* [[A]], i32* [[B]])
+; CHECK-NEXT: call void @outlined_ir_func_0(ptr [[A]], ptr [[B]])
; CHECK-NEXT: br label [[NORMAL]]
; CHECK: normal:
; CHECK-NEXT: ret void
%cs1 = catchswitch within none [label %catchpad1] unwind to caller
catchpad1:
catchpad within %cs1 []
- store i32 2, i32* %a, align 4
- store i32 3, i32* %b, align 4
+ store i32 2, ptr %a, align 4
+ store i32 3, ptr %b, align 4
br label %normal
normal:
ret void
; CHECK-NEXT: [[CS1:%.*]] = catchswitch within none [label %catchpad1] unwind to caller
; CHECK: catchpad1:
; CHECK-NEXT: [[TMP0:%.*]] = catchpad within [[CS1]] []
-; CHECK-NEXT: call void @outlined_ir_func_0(i32* [[A]], i32* [[B]])
+; CHECK-NEXT: call void @outlined_ir_func_0(ptr [[A]], ptr [[B]])
; CHECK-NEXT: br label [[NORMAL]]
; CHECK: normal:
; CHECK-NEXT: ret void
%cs1 = catchswitch within none [label %catchpad1] unwind to caller
catchpad1:
catchpad within %cs1 []
- store i32 2, i32* %a, align 4
- store i32 3, i32* %b, align 4
+ store i32 2, ptr %a, align 4
+ store i32 3, ptr %b, align 4
br label %normal
normal:
ret void
; CHECK-NEXT: to label [[NORMAL:%.*]] unwind label [[EXCEPTION:%.*]]
; CHECK: exception:
; CHECK-NEXT: [[CLEAN:%.*]] = cleanuppad within none []
-; CHECK-NEXT: call void @outlined_ir_func_0(i32* [[A]], i32* [[B]])
+; CHECK-NEXT: call void @outlined_ir_func_0(ptr [[A]], ptr [[B]])
; CHECK-NEXT: br label [[NORMAL]]
; CHECK: normal:
; CHECK-NEXT: ret void
invoke void @llvm.donothing() to label %normal unwind label %exception
exception:
%clean = cleanuppad within none []
- store i32 2, i32* %a, align 4
- store i32 3, i32* %b, align 4
+ store i32 2, ptr %a, align 4
+ store i32 3, ptr %b, align 4
br label %normal
normal:
ret void
; CHECK-NEXT: to label [[NORMAL:%.*]] unwind label [[EXCEPTION:%.*]]
; CHECK: exception:
; CHECK-NEXT: [[CLEAN:%.*]] = cleanuppad within none []
-; CHECK-NEXT: call void @outlined_ir_func_0(i32* [[A]], i32* [[B]])
+; CHECK-NEXT: call void @outlined_ir_func_0(ptr [[A]], ptr [[B]])
; CHECK-NEXT: br label [[NORMAL]]
; CHECK: normal:
; CHECK-NEXT: ret void
invoke void @llvm.donothing() to label %normal unwind label %exception
exception:
%clean = cleanuppad within none []
- store i32 2, i32* %a, align 4
- store i32 3, i32* %b, align 4
+ store i32 2, ptr %a, align 4
+ store i32 3, ptr %b, align 4
br label %normal
normal:
ret void
; Show that we do not extract freeze instructions, since extra handling is
; required to mark any outputs used with freeze.
-define void @function1(i32* %a, i32* %b) {
+define void @function1(ptr %a, ptr %b) {
; CHECK-LABEL: @function1(
; CHECK-NEXT: entry:
; CHECK-NEXT: br label [[FIRST:%.*]]
; CHECK: first:
-; CHECK-NEXT: [[C:%.*]] = freeze i32* [[A:%.*]]
-; CHECK-NEXT: call void @outlined_ir_func_0(i32* [[C]], i32* [[B:%.*]])
+; CHECK-NEXT: [[C:%.*]] = freeze ptr [[A:%.*]]
+; CHECK-NEXT: call void @outlined_ir_func_0(ptr [[C]], ptr [[B:%.*]])
; CHECK-NEXT: ret void
; CHECK: next:
; CHECK-NEXT: br label [[FIRST]]
entry:
br label %first
first:
- %c = freeze i32* %a
- store i32 2, i32* %c, align 4
- store i32 3, i32* %b, align 4
+ %c = freeze ptr %a
+ store i32 2, ptr %c, align 4
+ store i32 3, ptr %b, align 4
ret void
next:
br label %first
}
-define void @function2(i32* %a, i32* %b) {
+define void @function2(ptr %a, ptr %b) {
; CHECK-LABEL: @function2(
; CHECK-NEXT: entry:
; CHECK-NEXT: br label [[FIRST:%.*]]
; CHECK: first:
-; CHECK-NEXT: [[C:%.*]] = freeze i32* [[A:%.*]]
-; CHECK-NEXT: call void @outlined_ir_func_0(i32* [[C]], i32* [[B:%.*]])
+; CHECK-NEXT: [[C:%.*]] = freeze ptr [[A:%.*]]
+; CHECK-NEXT: call void @outlined_ir_func_0(ptr [[C]], ptr [[B:%.*]])
; CHECK-NEXT: ret void
; CHECK: next:
; CHECK-NEXT: br label [[FIRST]]
entry:
br label %first
first:
- %c = freeze i32* %a
- store i32 2, i32* %c, align 4
- store i32 3, i32* %b, align 4
+ %c = freeze ptr %a
+ store i32 2, ptr %c, align 4
+ store i32 3, ptr %b, align 4
ret void
next:
br label %first
; This test checks that we do not outline indirect calls when it is specified
; that we should not.
-declare void @f1(i32*, i32*);
-declare void @f2(i32*, i32*);
+declare void @f1(ptr, ptr);
+declare void @f2(ptr, ptr);
-define void @function1(void()* %func) {
+define void @function1(ptr %func) {
; CHECK-LABEL: @function1(
; CHECK-NEXT: entry:
; CHECK-NEXT: [[A:%.*]] = alloca i32, align 4
; CHECK-NEXT: [[B:%.*]] = alloca i32, align 4
; CHECK-NEXT: [[C:%.*]] = alloca i32, align 4
-; CHECK-NEXT: call void @outlined_ir_func_1(i32* [[A]], i32* [[B]], i32* [[C]])
+; CHECK-NEXT: call void @outlined_ir_func_1(ptr [[A]], ptr [[B]], ptr [[C]])
; CHECK-NEXT: call void [[FUNC:%.*]]()
-; CHECK-NEXT: call void @outlined_ir_func_0(i32* [[A]], i32* [[B]], i32* [[C]])
+; CHECK-NEXT: call void @outlined_ir_func_0(ptr [[A]], ptr [[B]], ptr [[C]])
; CHECK-NEXT: ret void
;
entry:
%a = alloca i32, align 4
%b = alloca i32, align 4
%c = alloca i32, align 4
- store i32 2, i32* %a, align 4
- store i32 3, i32* %b, align 4
- store i32 4, i32* %c, align 4
+ store i32 2, ptr %a, align 4
+ store i32 3, ptr %b, align 4
+ store i32 4, ptr %c, align 4
call void %func()
- %al = load i32, i32* %a
- %bl = load i32, i32* %b
- %cl = load i32, i32* %c
+ %al = load i32, ptr %a
+ %bl = load i32, ptr %b
+ %cl = load i32, ptr %c
ret void
}
-define void @function2(void()* %func) {
+define void @function2(ptr %func) {
; CHECK-LABEL: @function2(
; CHECK-NEXT: entry:
; CHECK-NEXT: [[A:%.*]] = alloca i32, align 4
; CHECK-NEXT: [[B:%.*]] = alloca i32, align 4
; CHECK-NEXT: [[C:%.*]] = alloca i32, align 4
-; CHECK-NEXT: call void @outlined_ir_func_1(i32* [[A]], i32* [[B]], i32* [[C]])
+; CHECK-NEXT: call void @outlined_ir_func_1(ptr [[A]], ptr [[B]], ptr [[C]])
; CHECK-NEXT: call void [[FUNC:%.*]]()
-; CHECK-NEXT: call void @outlined_ir_func_0(i32* [[A]], i32* [[B]], i32* [[C]])
+; CHECK-NEXT: call void @outlined_ir_func_0(ptr [[A]], ptr [[B]], ptr [[C]])
; CHECK-NEXT: ret void
;
entry:
%a = alloca i32, align 4
%b = alloca i32, align 4
%c = alloca i32, align 4
- store i32 2, i32* %a, align 4
- store i32 3, i32* %b, align 4
- store i32 4, i32* %c, align 4
+ store i32 2, ptr %a, align 4
+ store i32 3, ptr %b, align 4
+ store i32 4, ptr %c, align 4
call void %func()
- %al = load i32, i32* %a
- %bl = load i32, i32* %b
- %cl = load i32, i32* %c
+ %al = load i32, ptr %a
+ %bl = load i32, ptr %b
+ %cl = load i32, ptr %c
ret void
}
; CHECK-NEXT: entry:
; CHECK-NEXT: [[A:%.*]] = alloca i32, align 4
; CHECK-NEXT: [[B:%.*]] = alloca i32, align 4
-; CHECK-NEXT: call void @outlined_ir_func_0(i32* [[A]], i32* [[B]])
+; CHECK-NEXT: call void @outlined_ir_func_0(ptr [[A]], ptr [[B]])
; CHECK-NEXT: invoke void @llvm.donothing()
; CHECK-NEXT: to label [[NORMAL:%.*]] unwind label [[EXCEPTION:%.*]]
; CHECK: exception:
entry:
%a = alloca i32, align 4
%b = alloca i32, align 4
- store i32 2, i32* %a, align 4
- store i32 3, i32* %b, align 4
+ store i32 2, ptr %a, align 4
+ store i32 3, ptr %b, align 4
invoke void @llvm.donothing() to label %normal unwind label %exception
exception:
%cleanup = landingpad i8 cleanup
; CHECK-NEXT: entry:
; CHECK-NEXT: [[A:%.*]] = alloca i32, align 4
; CHECK-NEXT: [[B:%.*]] = alloca i32, align 4
-; CHECK-NEXT: call void @outlined_ir_func_0(i32* [[A]], i32* [[B]])
+; CHECK-NEXT: call void @outlined_ir_func_0(ptr [[A]], ptr [[B]])
; CHECK-NEXT: invoke void @llvm.donothing()
; CHECK-NEXT: to label [[NORMAL:%.*]] unwind label [[EXCEPTION:%.*]]
; CHECK: exception:
entry:
%a = alloca i32, align 4
%b = alloca i32, align 4
- store i32 2, i32* %a, align 4
- store i32 3, i32* %b, align 4
+ store i32 2, ptr %a, align 4
+ store i32 3, ptr %b, align 4
invoke void @llvm.donothing() to label %normal unwind label %exception
exception:
%cleanup = landingpad i8 cleanup
; CHECK: exception:
; CHECK-NEXT: [[CLEANUP:%.*]] = landingpad i8
; CHECK-NEXT: cleanup
-; CHECK-NEXT: call void @outlined_ir_func_0(i32* [[A]], i32* [[B]])
+; CHECK-NEXT: call void @outlined_ir_func_0(ptr [[A]], ptr [[B]])
; CHECK-NEXT: br label [[NORMAL]]
; CHECK: normal:
; CHECK-NEXT: ret void
invoke void @llvm.donothing() to label %normal unwind label %exception
exception:
%cleanup = landingpad i8 cleanup
- store i32 2, i32* %a, align 4
- store i32 3, i32* %b, align 4
+ store i32 2, ptr %a, align 4
+ store i32 3, ptr %b, align 4
br label %normal
normal:
ret void
; CHECK: exception:
; CHECK-NEXT: [[CLEANUP:%.*]] = landingpad i8
; CHECK-NEXT: cleanup
-; CHECK-NEXT: call void @outlined_ir_func_0(i32* [[A]], i32* [[B]])
+; CHECK-NEXT: call void @outlined_ir_func_0(ptr [[A]], ptr [[B]])
; CHECK-NEXT: br label [[NORMAL]]
; CHECK: normal:
; CHECK-NEXT: ret void
invoke void @llvm.donothing() to label %normal unwind label %exception
exception:
%cleanup = landingpad i8 cleanup
- store i32 2, i32* %a, align 4
- store i32 3, i32* %b, align 4
+ store i32 2, ptr %a, align 4
+ store i32 3, ptr %b, align 4
br label %normal
normal:
ret void
; This test checks that we do not outline memcpy intrinsics since it may require
; extra address space checks.
-declare void @llvm.memcpy.p0i8.p0i8.i64(i8* nocapture writeonly, i8* nocapture readonly, i64, i1)
+declare void @llvm.memcpy.p0.p0.i64(ptr nocapture writeonly, ptr nocapture readonly, i64, i1)
-define i8 @function1(i8* noalias %s, i8* noalias %d, i64 %len) {
+define i8 @function1(ptr noalias %s, ptr noalias %d, i64 %len) {
; CHECK-LABEL: @function1(
; CHECK-NEXT: entry:
; CHECK-NEXT: [[B_LOC:%.*]] = alloca i8, align 1
; CHECK-NEXT: [[A_LOC:%.*]] = alloca i8, align 1
; CHECK-NEXT: [[RET_LOC:%.*]] = alloca i8, align 1
-; CHECK-NEXT: call void @llvm.lifetime.start.p0i8(i64 -1, i8* [[A_LOC]])
-; CHECK-NEXT: call void @llvm.lifetime.start.p0i8(i64 -1, i8* [[B_LOC]])
-; CHECK-NEXT: call void @outlined_ir_func_1(i8* [[S:%.*]], i8* [[D:%.*]], i8* [[A_LOC]], i8* [[B_LOC]])
-; CHECK-NEXT: [[A_RELOAD:%.*]] = load i8, i8* [[A_LOC]], align 1
-; CHECK-NEXT: [[B_RELOAD:%.*]] = load i8, i8* [[B_LOC]], align 1
-; CHECK-NEXT: call void @llvm.lifetime.end.p0i8(i64 -1, i8* [[A_LOC]])
-; CHECK-NEXT: call void @llvm.lifetime.end.p0i8(i64 -1, i8* [[B_LOC]])
-; CHECK-NEXT: call void @llvm.memcpy.p0i8.p0i8.i64(i8* [[D]], i8* [[S]], i64 [[LEN:%.*]], i1 false)
-; CHECK-NEXT: call void @llvm.lifetime.start.p0i8(i64 -1, i8* [[RET_LOC]])
-; CHECK-NEXT: call void @outlined_ir_func_0(i8 [[A_RELOAD]], i8 [[B_RELOAD]], i8* [[S]], i8* [[RET_LOC]])
-; CHECK-NEXT: [[RET_RELOAD:%.*]] = load i8, i8* [[RET_LOC]], align 1
-; CHECK-NEXT: call void @llvm.lifetime.end.p0i8(i64 -1, i8* [[RET_LOC]])
+; CHECK-NEXT: call void @llvm.lifetime.start.p0(i64 -1, ptr [[A_LOC]])
+; CHECK-NEXT: call void @llvm.lifetime.start.p0(i64 -1, ptr [[B_LOC]])
+; CHECK-NEXT: call void @outlined_ir_func_1(ptr [[S:%.*]], ptr [[D:%.*]], ptr [[A_LOC]], ptr [[B_LOC]])
+; CHECK-NEXT: [[A_RELOAD:%.*]] = load i8, ptr [[A_LOC]], align 1
+; CHECK-NEXT: [[B_RELOAD:%.*]] = load i8, ptr [[B_LOC]], align 1
+; CHECK-NEXT: call void @llvm.lifetime.end.p0(i64 -1, ptr [[A_LOC]])
+; CHECK-NEXT: call void @llvm.lifetime.end.p0(i64 -1, ptr [[B_LOC]])
+; CHECK-NEXT: call void @llvm.memcpy.p0.p0.i64(ptr [[D]], ptr [[S]], i64 [[LEN:%.*]], i1 false)
+; CHECK-NEXT: call void @llvm.lifetime.start.p0(i64 -1, ptr [[RET_LOC]])
+; CHECK-NEXT: call void @outlined_ir_func_0(i8 [[A_RELOAD]], i8 [[B_RELOAD]], ptr [[S]], ptr [[RET_LOC]])
+; CHECK-NEXT: [[RET_RELOAD:%.*]] = load i8, ptr [[RET_LOC]], align 1
+; CHECK-NEXT: call void @llvm.lifetime.end.p0(i64 -1, ptr [[RET_LOC]])
; CHECK-NEXT: ret i8 [[RET_RELOAD]]
;
entry:
- %a = load i8, i8* %s
- %b = load i8, i8* %d
- call void @llvm.memcpy.p0i8.p0i8.i64(i8* %d, i8* %s, i64 %len, i1 false)
+ %a = load i8, ptr %s
+ %b = load i8, ptr %d
+ call void @llvm.memcpy.p0.p0.i64(ptr %d, ptr %s, i64 %len, i1 false)
%c = add i8 %a, %b
- %ret = load i8, i8* %s
+ %ret = load i8, ptr %s
ret i8 %ret
}
-define i8 @function2(i8* noalias %s, i8* noalias %d, i64 %len) {
+define i8 @function2(ptr noalias %s, ptr noalias %d, i64 %len) {
; CHECK-LABEL: @function2(
; CHECK-NEXT: entry:
; CHECK-NEXT: [[B_LOC:%.*]] = alloca i8, align 1
; CHECK-NEXT: [[A_LOC:%.*]] = alloca i8, align 1
; CHECK-NEXT: [[RET_LOC:%.*]] = alloca i8, align 1
-; CHECK-NEXT: call void @llvm.lifetime.start.p0i8(i64 -1, i8* [[A_LOC]])
-; CHECK-NEXT: call void @llvm.lifetime.start.p0i8(i64 -1, i8* [[B_LOC]])
-; CHECK-NEXT: call void @outlined_ir_func_1(i8* [[S:%.*]], i8* [[D:%.*]], i8* [[A_LOC]], i8* [[B_LOC]])
-; CHECK-NEXT: [[A_RELOAD:%.*]] = load i8, i8* [[A_LOC]], align 1
-; CHECK-NEXT: [[B_RELOAD:%.*]] = load i8, i8* [[B_LOC]], align 1
-; CHECK-NEXT: call void @llvm.lifetime.end.p0i8(i64 -1, i8* [[A_LOC]])
-; CHECK-NEXT: call void @llvm.lifetime.end.p0i8(i64 -1, i8* [[B_LOC]])
-; CHECK-NEXT: call void @llvm.memcpy.p0i8.p0i8.i64(i8* [[D]], i8* [[S]], i64 [[LEN:%.*]], i1 false)
-; CHECK-NEXT: call void @llvm.lifetime.start.p0i8(i64 -1, i8* [[RET_LOC]])
-; CHECK-NEXT: call void @outlined_ir_func_0(i8 [[A_RELOAD]], i8 [[B_RELOAD]], i8* [[S]], i8* [[RET_LOC]])
-; CHECK-NEXT: [[RET_RELOAD:%.*]] = load i8, i8* [[RET_LOC]], align 1
-; CHECK-NEXT: call void @llvm.lifetime.end.p0i8(i64 -1, i8* [[RET_LOC]])
+; CHECK-NEXT: call void @llvm.lifetime.start.p0(i64 -1, ptr [[A_LOC]])
+; CHECK-NEXT: call void @llvm.lifetime.start.p0(i64 -1, ptr [[B_LOC]])
+; CHECK-NEXT: call void @outlined_ir_func_1(ptr [[S:%.*]], ptr [[D:%.*]], ptr [[A_LOC]], ptr [[B_LOC]])
+; CHECK-NEXT: [[A_RELOAD:%.*]] = load i8, ptr [[A_LOC]], align 1
+; CHECK-NEXT: [[B_RELOAD:%.*]] = load i8, ptr [[B_LOC]], align 1
+; CHECK-NEXT: call void @llvm.lifetime.end.p0(i64 -1, ptr [[A_LOC]])
+; CHECK-NEXT: call void @llvm.lifetime.end.p0(i64 -1, ptr [[B_LOC]])
+; CHECK-NEXT: call void @llvm.memcpy.p0.p0.i64(ptr [[D]], ptr [[S]], i64 [[LEN:%.*]], i1 false)
+; CHECK-NEXT: call void @llvm.lifetime.start.p0(i64 -1, ptr [[RET_LOC]])
+; CHECK-NEXT: call void @outlined_ir_func_0(i8 [[A_RELOAD]], i8 [[B_RELOAD]], ptr [[S]], ptr [[RET_LOC]])
+; CHECK-NEXT: [[RET_RELOAD:%.*]] = load i8, ptr [[RET_LOC]], align 1
+; CHECK-NEXT: call void @llvm.lifetime.end.p0(i64 -1, ptr [[RET_LOC]])
; CHECK-NEXT: ret i8 [[RET_RELOAD]]
;
entry:
- %a = load i8, i8* %s
- %b = load i8, i8* %d
- call void @llvm.memcpy.p0i8.p0i8.i64(i8* %d, i8* %s, i64 %len, i1 false)
+ %a = load i8, ptr %s
+ %b = load i8, ptr %d
+ call void @llvm.memcpy.p0.p0.i64(ptr %d, ptr %s, i64 %len, i1 false)
%c = add i8 %a, %b
- %ret = load i8, i8* %s
+ %ret = load i8, ptr %s
ret i8 %ret
}
; This test checks that we do not outline memcpy intrinsics since it may require
; extra address space checks.
-declare void @llvm.memmove.p0i8.p0i8.i64(i8* nocapture writeonly, i8* nocapture readonly, i64, i1)
+declare void @llvm.memmove.p0.p0.i64(ptr nocapture writeonly, ptr nocapture readonly, i64, i1)
-define i8 @function1(i8* noalias %s, i8* noalias %d, i64 %len) {
+define i8 @function1(ptr noalias %s, ptr noalias %d, i64 %len) {
; CHECK-LABEL: @function1(
; CHECK-NEXT: entry:
; CHECK-NEXT: [[B_LOC:%.*]] = alloca i8, align 1
; CHECK-NEXT: [[A_LOC:%.*]] = alloca i8, align 1
; CHECK-NEXT: [[RET_LOC:%.*]] = alloca i8, align 1
-; CHECK-NEXT: call void @llvm.lifetime.start.p0i8(i64 -1, i8* [[A_LOC]])
-; CHECK-NEXT: call void @llvm.lifetime.start.p0i8(i64 -1, i8* [[B_LOC]])
-; CHECK-NEXT: call void @outlined_ir_func_1(i8* [[S:%.*]], i8* [[D:%.*]], i8* [[A_LOC]], i8* [[B_LOC]])
-; CHECK-NEXT: [[A_RELOAD:%.*]] = load i8, i8* [[A_LOC]], align 1
-; CHECK-NEXT: [[B_RELOAD:%.*]] = load i8, i8* [[B_LOC]], align 1
-; CHECK-NEXT: call void @llvm.lifetime.end.p0i8(i64 -1, i8* [[A_LOC]])
-; CHECK-NEXT: call void @llvm.lifetime.end.p0i8(i64 -1, i8* [[B_LOC]])
-; CHECK-NEXT: call void @llvm.memmove.p0i8.p0i8.i64(i8* [[D]], i8* [[S]], i64 [[LEN:%.*]], i1 false)
-; CHECK-NEXT: call void @llvm.lifetime.start.p0i8(i64 -1, i8* [[RET_LOC]])
-; CHECK-NEXT: call void @outlined_ir_func_0(i8 [[A_RELOAD]], i8 [[B_RELOAD]], i8* [[S]], i8* [[RET_LOC]])
-; CHECK-NEXT: [[RET_RELOAD:%.*]] = load i8, i8* [[RET_LOC]], align 1
-; CHECK-NEXT: call void @llvm.lifetime.end.p0i8(i64 -1, i8* [[RET_LOC]])
+; CHECK-NEXT: call void @llvm.lifetime.start.p0(i64 -1, ptr [[A_LOC]])
+; CHECK-NEXT: call void @llvm.lifetime.start.p0(i64 -1, ptr [[B_LOC]])
+; CHECK-NEXT: call void @outlined_ir_func_1(ptr [[S:%.*]], ptr [[D:%.*]], ptr [[A_LOC]], ptr [[B_LOC]])
+; CHECK-NEXT: [[A_RELOAD:%.*]] = load i8, ptr [[A_LOC]], align 1
+; CHECK-NEXT: [[B_RELOAD:%.*]] = load i8, ptr [[B_LOC]], align 1
+; CHECK-NEXT: call void @llvm.lifetime.end.p0(i64 -1, ptr [[A_LOC]])
+; CHECK-NEXT: call void @llvm.lifetime.end.p0(i64 -1, ptr [[B_LOC]])
+; CHECK-NEXT: call void @llvm.memmove.p0.p0.i64(ptr [[D]], ptr [[S]], i64 [[LEN:%.*]], i1 false)
+; CHECK-NEXT: call void @llvm.lifetime.start.p0(i64 -1, ptr [[RET_LOC]])
+; CHECK-NEXT: call void @outlined_ir_func_0(i8 [[A_RELOAD]], i8 [[B_RELOAD]], ptr [[S]], ptr [[RET_LOC]])
+; CHECK-NEXT: [[RET_RELOAD:%.*]] = load i8, ptr [[RET_LOC]], align 1
+; CHECK-NEXT: call void @llvm.lifetime.end.p0(i64 -1, ptr [[RET_LOC]])
; CHECK-NEXT: ret i8 [[RET_RELOAD]]
;
entry:
- %a = load i8, i8* %s
- %b = load i8, i8* %d
- call void @llvm.memmove.p0i8.p0i8.i64(i8* %d, i8* %s, i64 %len, i1 false)
+ %a = load i8, ptr %s
+ %b = load i8, ptr %d
+ call void @llvm.memmove.p0.p0.i64(ptr %d, ptr %s, i64 %len, i1 false)
%c = add i8 %a, %b
- %ret = load i8, i8* %s
+ %ret = load i8, ptr %s
ret i8 %ret
}
-define i8 @function2(i8* noalias %s, i8* noalias %d, i64 %len) {
+define i8 @function2(ptr noalias %s, ptr noalias %d, i64 %len) {
; CHECK-LABEL: @function2(
; CHECK-NEXT: entry:
; CHECK-NEXT: [[B_LOC:%.*]] = alloca i8, align 1
; CHECK-NEXT: [[A_LOC:%.*]] = alloca i8, align 1
; CHECK-NEXT: [[RET_LOC:%.*]] = alloca i8, align 1
-; CHECK-NEXT: call void @llvm.lifetime.start.p0i8(i64 -1, i8* [[A_LOC]])
-; CHECK-NEXT: call void @llvm.lifetime.start.p0i8(i64 -1, i8* [[B_LOC]])
-; CHECK-NEXT: call void @outlined_ir_func_1(i8* [[S:%.*]], i8* [[D:%.*]], i8* [[A_LOC]], i8* [[B_LOC]])
-; CHECK-NEXT: [[A_RELOAD:%.*]] = load i8, i8* [[A_LOC]], align 1
-; CHECK-NEXT: [[B_RELOAD:%.*]] = load i8, i8* [[B_LOC]], align 1
-; CHECK-NEXT: call void @llvm.lifetime.end.p0i8(i64 -1, i8* [[A_LOC]])
-; CHECK-NEXT: call void @llvm.lifetime.end.p0i8(i64 -1, i8* [[B_LOC]])
-; CHECK-NEXT: call void @llvm.memmove.p0i8.p0i8.i64(i8* [[D]], i8* [[S]], i64 [[LEN:%.*]], i1 false)
-; CHECK-NEXT: call void @llvm.lifetime.start.p0i8(i64 -1, i8* [[RET_LOC]])
-; CHECK-NEXT: call void @outlined_ir_func_0(i8 [[A_RELOAD]], i8 [[B_RELOAD]], i8* [[S]], i8* [[RET_LOC]])
-; CHECK-NEXT: [[RET_RELOAD:%.*]] = load i8, i8* [[RET_LOC]], align 1
-; CHECK-NEXT: call void @llvm.lifetime.end.p0i8(i64 -1, i8* [[RET_LOC]])
+; CHECK-NEXT: call void @llvm.lifetime.start.p0(i64 -1, ptr [[A_LOC]])
+; CHECK-NEXT: call void @llvm.lifetime.start.p0(i64 -1, ptr [[B_LOC]])
+; CHECK-NEXT: call void @outlined_ir_func_1(ptr [[S:%.*]], ptr [[D:%.*]], ptr [[A_LOC]], ptr [[B_LOC]])
+; CHECK-NEXT: [[A_RELOAD:%.*]] = load i8, ptr [[A_LOC]], align 1
+; CHECK-NEXT: [[B_RELOAD:%.*]] = load i8, ptr [[B_LOC]], align 1
+; CHECK-NEXT: call void @llvm.lifetime.end.p0(i64 -1, ptr [[A_LOC]])
+; CHECK-NEXT: call void @llvm.lifetime.end.p0(i64 -1, ptr [[B_LOC]])
+; CHECK-NEXT: call void @llvm.memmove.p0.p0.i64(ptr [[D]], ptr [[S]], i64 [[LEN:%.*]], i1 false)
+; CHECK-NEXT: call void @llvm.lifetime.start.p0(i64 -1, ptr [[RET_LOC]])
+; CHECK-NEXT: call void @outlined_ir_func_0(i8 [[A_RELOAD]], i8 [[B_RELOAD]], ptr [[S]], ptr [[RET_LOC]])
+; CHECK-NEXT: [[RET_RELOAD:%.*]] = load i8, ptr [[RET_LOC]], align 1
+; CHECK-NEXT: call void @llvm.lifetime.end.p0(i64 -1, ptr [[RET_LOC]])
; CHECK-NEXT: ret i8 [[RET_RELOAD]]
;
entry:
- %a = load i8, i8* %s
- %b = load i8, i8* %d
- call void @llvm.memmove.p0i8.p0i8.i64(i8* %d, i8* %s, i64 %len, i1 false)
+ %a = load i8, ptr %s
+ %b = load i8, ptr %d
+ call void @llvm.memmove.p0.p0.i64(ptr %d, ptr %s, i64 %len, i1 false)
%c = add i8 %a, %b
- %ret = load i8, i8* %s
+ %ret = load i8, ptr %s
ret i8 %ret
}
; This test checks that we do not outline memset intrinsics since it requires
; extra address space checks.
-declare void @llvm.memset.p0i8.i64(i8* nocapture writeonly, i8, i64, i32, i1)
+declare void @llvm.memset.p0.i64(ptr nocapture writeonly, i8, i64, i32, i1)
define i64 @function1(i64 %x, i64 %z, i64 %n) {
; CHECK-LABEL: @function1(
; CHECK-NEXT: entry:
; CHECK-NEXT: [[POOL:%.*]] = alloca [59 x i64], align 4
-; CHECK-NEXT: [[TMP:%.*]] = bitcast [59 x i64]* [[POOL]] to i8*
-; CHECK-NEXT: call void @llvm.memset.p0i8.i64(i8* nonnull align 4 [[TMP]], i8 0, i64 236, i1 false)
+; CHECK-NEXT: call void @llvm.memset.p0.i64(ptr nonnull align 4 [[POOL]], i8 0, i64 236, i1 false)
; CHECK-NEXT: call void @outlined_ir_func_0(i64 [[N:%.*]], i64 [[X:%.*]], i64 [[Z:%.*]])
; CHECK-NEXT: ret i64 0
;
entry:
%pool = alloca [59 x i64], align 4
- %tmp = bitcast [59 x i64]* %pool to i8*
- call void @llvm.memset.p0i8.i64(i8* nonnull %tmp, i8 0, i64 236, i32 4, i1 false)
+ call void @llvm.memset.p0.i64(ptr nonnull %pool, i8 0, i64 236, i32 4, i1 false)
%cmp3 = icmp eq i64 %n, 0
%a = add i64 %x, %z
%c = add i64 %x, %z
; CHECK-LABEL: @function2(
; CHECK-NEXT: entry:
; CHECK-NEXT: [[POOL:%.*]] = alloca [59 x i64], align 4
-; CHECK-NEXT: [[TMP:%.*]] = bitcast [59 x i64]* [[POOL]] to i8*
-; CHECK-NEXT: call void @llvm.memset.p0i8.i64(i8* nonnull align 4 [[TMP]], i8 0, i64 236, i1 false)
+; CHECK-NEXT: call void @llvm.memset.p0.i64(ptr nonnull align 4 [[POOL]], i8 0, i64 236, i1 false)
; CHECK-NEXT: call void @outlined_ir_func_0(i64 [[N:%.*]], i64 [[X:%.*]], i64 [[Z:%.*]])
; CHECK-NEXT: ret i64 0
;
entry:
%pool = alloca [59 x i64], align 4
- %tmp = bitcast [59 x i64]* %pool to i8*
- call void @llvm.memset.p0i8.i64(i8* nonnull %tmp, i8 0, i64 236, i32 4, i1 false)
+ call void @llvm.memset.p0.i64(ptr nonnull %pool, i8 0, i64 236, i32 4, i1 false)
%cmp3 = icmp eq i64 %n, 0
%a = add i64 %x, %z
%c = add i64 %x, %z
; Show that we do not extract phi nodes as it would require extra label and
; control flow checking.
-define void @function1(i32* %a, i32* %b) {
+define void @function1(ptr %a, ptr %b) {
; CHECK-LABEL: @function1(
; CHECK-NEXT: entry:
; CHECK-NEXT: br label [[FIRST:%.*]]
; CHECK: first:
; CHECK-NEXT: [[TMP0:%.*]] = phi i32 [ 0, [[ENTRY:%.*]] ], [ 3, [[NEXT:%.*]] ]
-; CHECK-NEXT: call void @outlined_ir_func_0(i32* [[A:%.*]], i32* [[B:%.*]])
+; CHECK-NEXT: call void @outlined_ir_func_0(ptr [[A:%.*]], ptr [[B:%.*]])
; CHECK-NEXT: ret void
; CHECK: next:
; CHECK-NEXT: br label [[FIRST]]
br label %first
first:
%0 = phi i32 [ 0, %entry ], [ 3, %next ]
- store i32 2, i32* %a, align 4
- store i32 3, i32* %b, align 4
+ store i32 2, ptr %a, align 4
+ store i32 3, ptr %b, align 4
ret void
next:
br label %first
}
-define void @function2(i32* %a, i32* %b) {
+define void @function2(ptr %a, ptr %b) {
; CHECK-LABEL: @function2(
; CHECK-NEXT: entry:
; CHECK-NEXT: br label [[FIRST:%.*]]
; CHECK: first:
; CHECK-NEXT: [[TMP0:%.*]] = phi i32 [ 0, [[ENTRY:%.*]] ], [ 3, [[NEXT:%.*]] ]
-; CHECK-NEXT: call void @outlined_ir_func_0(i32* [[A:%.*]], i32* [[B:%.*]])
+; CHECK-NEXT: call void @outlined_ir_func_0(ptr [[A:%.*]], ptr [[B:%.*]])
; CHECK-NEXT: ret void
; CHECK: next:
; CHECK-NEXT: br label [[FIRST]]
br label %first
first:
%0 = phi i32 [ 0, %entry ], [ 3, %next ]
- store i32 2, i32* %a, align 4
- store i32 3, i32* %b, align 4
+ store i32 2, ptr %a, align 4
+ store i32 3, ptr %b, align 4
ret void
next:
br label %first
; outliner behaves, causing miscompiles.
; Function Attrs: optsize returns_twice
-declare i32 @setjmp(i32*) local_unnamed_addr #1
+declare i32 @setjmp(ptr) local_unnamed_addr #1
@tmp_jmpb = global [37 x i32] zeroinitializer, align 16
define void @function1() {
; CHECK-NEXT: [[A:%.*]] = alloca i32, align 4
; CHECK-NEXT: [[B:%.*]] = alloca i32, align 4
; CHECK-NEXT: [[C:%.*]] = alloca i32, align 4
-; CHECK-NEXT: store i32 2, i32* [[A]], align 4
-; CHECK-NEXT: store i32 3, i32* [[B]], align 4
-; CHECK-NEXT: store i32 4, i32* [[C]], align 4
-; CHECK-NEXT: [[CALL:%.*]] = call i32 @setjmp(i32* getelementptr inbounds ([37 x i32], [37 x i32]* @tmp_jmpb, i64 0, i64 0))
-; CHECK-NEXT: call void @outlined_ir_func_0(i32* [[A]], i32* [[B]], i32* [[C]])
+; CHECK-NEXT: store i32 2, ptr [[A]], align 4
+; CHECK-NEXT: store i32 3, ptr [[B]], align 4
+; CHECK-NEXT: store i32 4, ptr [[C]], align 4
+; CHECK-NEXT: [[CALL:%.*]] = call i32 @setjmp(ptr @tmp_jmpb)
+; CHECK-NEXT: call void @outlined_ir_func_0(ptr [[A]], ptr [[B]], ptr [[C]])
; CHECK-NEXT: ret void
;
entry:
%a = alloca i32, align 4
%b = alloca i32, align 4
%c = alloca i32, align 4
- store i32 2, i32* %a, align 4
- store i32 3, i32* %b, align 4
- store i32 4, i32* %c, align 4
- %call = call i32 @setjmp(i32* getelementptr inbounds ([37 x i32], [37 x i32]* @tmp_jmpb, i64 0, i64 0))
- %al = load i32, i32* %a
- %bl = load i32, i32* %b
- %cl = load i32, i32* %c
+ store i32 2, ptr %a, align 4
+ store i32 3, ptr %b, align 4
+ store i32 4, ptr %c, align 4
+ %call = call i32 @setjmp(ptr @tmp_jmpb)
+ %al = load i32, ptr %a
+ %bl = load i32, ptr %b
+ %cl = load i32, ptr %c
ret void
}
; CHECK-NEXT: [[A:%.*]] = alloca i32, align 4
; CHECK-NEXT: [[B:%.*]] = alloca i32, align 4
; CHECK-NEXT: [[C:%.*]] = alloca i32, align 4
-; CHECK-NEXT: store i32 2, i32* [[A]], align 4
-; CHECK-NEXT: store i32 3, i32* [[B]], align 4
-; CHECK-NEXT: store i32 4, i32* [[C]], align 4
-; CHECK-NEXT: [[CALL:%.*]] = call i32 @setjmp(i32* getelementptr inbounds ([37 x i32], [37 x i32]* @tmp_jmpb, i64 0, i64 0))
-; CHECK-NEXT: call void @outlined_ir_func_0(i32* [[A]], i32* [[B]], i32* [[C]])
+; CHECK-NEXT: store i32 2, ptr [[A]], align 4
+; CHECK-NEXT: store i32 3, ptr [[B]], align 4
+; CHECK-NEXT: store i32 4, ptr [[C]], align 4
+; CHECK-NEXT: [[CALL:%.*]] = call i32 @setjmp(ptr @tmp_jmpb)
+; CHECK-NEXT: call void @outlined_ir_func_0(ptr [[A]], ptr [[B]], ptr [[C]])
; CHECK-NEXT: ret void
;
entry:
%a = alloca i32, align 4
%b = alloca i32, align 4
%c = alloca i32, align 4
- store i32 2, i32* %a, align 4
- store i32 3, i32* %b, align 4
- store i32 4, i32* %c, align 4
- %call = call i32 @setjmp(i32* getelementptr inbounds ([37 x i32], [37 x i32]* @tmp_jmpb, i64 0, i64 0))
- %al = load i32, i32* %a
- %bl = load i32, i32* %b
- %cl = load i32, i32* %c
+ store i32 2, ptr %a, align 4
+ store i32 3, ptr %b, align 4
+ store i32 4, ptr %c, align 4
+ %call = call i32 @setjmp(ptr @tmp_jmpb)
+ %al = load i32, ptr %a
+ %bl = load i32, ptr %b
+ %cl = load i32, ptr %c
ret void
}
; This test ensures that we do not outline vararg instructions or intrinsics, as
; they may cause inconsistencies when outlining.
-declare void @llvm.va_start(i8*)
-declare void @llvm.va_copy(i8*, i8*)
-declare void @llvm.va_end(i8*)
+declare void @llvm.va_start(ptr)
+declare void @llvm.va_copy(ptr, ptr)
+declare void @llvm.va_end(ptr)
-define i32 @func1(i32 %a, double %b, i8* %v, ...) nounwind {
+define i32 @func1(i32 %a, double %b, ptr %v, ...) nounwind {
; CHECK-LABEL: @func1(
; CHECK-NEXT: entry:
-; CHECK-NEXT: [[AP1_LOC:%.*]] = alloca i8*, align 8
+; CHECK-NEXT: [[TMP_LOC:%.*]] = alloca i32, align 4
; CHECK-NEXT: [[A_ADDR:%.*]] = alloca i32, align 4
; CHECK-NEXT: [[B_ADDR:%.*]] = alloca double, align 8
-; CHECK-NEXT: [[AP:%.*]] = alloca i8*, align 4
+; CHECK-NEXT: [[AP:%.*]] = alloca ptr, align 4
; CHECK-NEXT: [[C:%.*]] = alloca i32, align 4
-; CHECK-NEXT: [[LT_CAST:%.*]] = bitcast i8** [[AP1_LOC]] to i8*
-; CHECK-NEXT: call void @llvm.lifetime.start.p0i8(i64 -1, i8* [[LT_CAST]])
-; CHECK-NEXT: call void @outlined_ir_func_0(i32 [[A:%.*]], i32* [[A_ADDR]], double [[B:%.*]], double* [[B_ADDR]], i8** [[AP]], i8** [[AP1_LOC]])
-; CHECK-NEXT: [[AP1_RELOAD:%.*]] = load i8*, i8** [[AP1_LOC]], align 8
-; CHECK-NEXT: call void @llvm.lifetime.end.p0i8(i64 -1, i8* [[LT_CAST]])
-; CHECK-NEXT: call void @llvm.va_start(i8* [[AP1_RELOAD]])
-; CHECK-NEXT: [[TMP0:%.*]] = va_arg i8** [[AP]], i32
-; CHECK-NEXT: call void @llvm.va_copy(i8* [[V:%.*]], i8* [[AP1_RELOAD]])
-; CHECK-NEXT: call void @llvm.va_end(i8* [[AP1_RELOAD]])
-; CHECK-NEXT: store i32 [[TMP0]], i32* [[C]], align 4
-; CHECK-NEXT: [[TMP:%.*]] = load i32, i32* [[C]], align 4
-; CHECK-NEXT: ret i32 [[TMP]]
+; CHECK-NEXT: call void @outlined_ir_func_0(i32 [[A:%.*]], ptr [[A_ADDR]], double [[B:%.*]], ptr [[B_ADDR]])
+; CHECK-NEXT: call void @llvm.va_start(ptr [[AP]])
+; CHECK-NEXT: [[TMP0:%.*]] = va_arg ptr [[AP]], i32
+; CHECK-NEXT: call void @llvm.va_copy(ptr [[V:%.*]], ptr [[AP]])
+; CHECK-NEXT: call void @llvm.va_end(ptr [[AP]])
+; CHECK-NEXT: call void @llvm.lifetime.start.p0(i64 -1, ptr [[TMP_LOC]])
+; CHECK-NEXT: call void @outlined_ir_func_1(i32 [[TMP0]], ptr [[C]], ptr [[TMP_LOC]])
+; CHECK-NEXT: [[TMP_RELOAD:%.*]] = load i32, ptr [[TMP_LOC]], align 4
+; CHECK-NEXT: call void @llvm.lifetime.end.p0(i64 -1, ptr [[TMP_LOC]])
+; CHECK-NEXT: ret i32 [[TMP_RELOAD]]
;
entry:
%a.addr = alloca i32, align 4
%b.addr = alloca double, align 8
- %ap = alloca i8*, align 4
+ %ap = alloca ptr, align 4
%c = alloca i32, align 4
- store i32 %a, i32* %a.addr, align 4
- store double %b, double* %b.addr, align 8
- %ap1 = bitcast i8** %ap to i8*
- call void @llvm.va_start(i8* %ap1)
- %0 = va_arg i8** %ap, i32
- call void @llvm.va_copy(i8* %v, i8* %ap1)
- call void @llvm.va_end(i8* %ap1)
- store i32 %0, i32* %c, align 4
- %tmp = load i32, i32* %c, align 4
+ store i32 %a, ptr %a.addr, align 4
+ store double %b, ptr %b.addr, align 8
+ call void @llvm.va_start(ptr %ap)
+ %0 = va_arg ptr %ap, i32
+ call void @llvm.va_copy(ptr %v, ptr %ap)
+ call void @llvm.va_end(ptr %ap)
+ store i32 %0, ptr %c, align 4
+ %tmp = load i32, ptr %c, align 4
ret i32 %tmp
}
-define i32 @func2(i32 %a, double %b, i8* %v, ...) nounwind {
+define i32 @func2(i32 %a, double %b, ptr %v, ...) nounwind {
; CHECK-LABEL: @func2(
; CHECK-NEXT: entry:
-; CHECK-NEXT: [[AP1_LOC:%.*]] = alloca i8*, align 8
+; CHECK-NEXT: [[TMP_LOC:%.*]] = alloca i32, align 4
; CHECK-NEXT: [[A_ADDR:%.*]] = alloca i32, align 4
; CHECK-NEXT: [[B_ADDR:%.*]] = alloca double, align 8
-; CHECK-NEXT: [[AP:%.*]] = alloca i8*, align 4
+; CHECK-NEXT: [[AP:%.*]] = alloca ptr, align 4
; CHECK-NEXT: [[C:%.*]] = alloca i32, align 4
-; CHECK-NEXT: [[LT_CAST:%.*]] = bitcast i8** [[AP1_LOC]] to i8*
-; CHECK-NEXT: call void @llvm.lifetime.start.p0i8(i64 -1, i8* [[LT_CAST]])
-; CHECK-NEXT: call void @outlined_ir_func_0(i32 [[A:%.*]], i32* [[A_ADDR]], double [[B:%.*]], double* [[B_ADDR]], i8** [[AP]], i8** [[AP1_LOC]])
-; CHECK-NEXT: [[AP1_RELOAD:%.*]] = load i8*, i8** [[AP1_LOC]], align 8
-; CHECK-NEXT: call void @llvm.lifetime.end.p0i8(i64 -1, i8* [[LT_CAST]])
-; CHECK-NEXT: call void @llvm.va_start(i8* [[AP1_RELOAD]])
-; CHECK-NEXT: [[TMP0:%.*]] = va_arg i8** [[AP]], i32
-; CHECK-NEXT: call void @llvm.va_copy(i8* [[V:%.*]], i8* [[AP1_RELOAD]])
-; CHECK-NEXT: call void @llvm.va_end(i8* [[AP1_RELOAD]])
-; CHECK-NEXT: store i32 [[TMP0]], i32* [[C]], align 4
-; CHECK-NEXT: [[AP2:%.*]] = bitcast i8** [[AP]] to i8*
-; CHECK-NEXT: [[TMP:%.*]] = load i32, i32* [[C]], align 4
-; CHECK-NEXT: ret i32 [[TMP]]
+; CHECK-NEXT: call void @outlined_ir_func_0(i32 [[A:%.*]], ptr [[A_ADDR]], double [[B:%.*]], ptr [[B_ADDR]])
+; CHECK-NEXT: call void @llvm.va_start(ptr [[AP]])
+; CHECK-NEXT: [[TMP0:%.*]] = va_arg ptr [[AP]], i32
+; CHECK-NEXT: call void @llvm.va_copy(ptr [[V:%.*]], ptr [[AP]])
+; CHECK-NEXT: call void @llvm.va_end(ptr [[AP]])
+; CHECK-NEXT: call void @llvm.lifetime.start.p0(i64 -1, ptr [[TMP_LOC]])
+; CHECK-NEXT: call void @outlined_ir_func_1(i32 [[TMP0]], ptr [[C]], ptr [[TMP_LOC]])
+; CHECK-NEXT: [[TMP_RELOAD:%.*]] = load i32, ptr [[TMP_LOC]], align 4
+; CHECK-NEXT: call void @llvm.lifetime.end.p0(i64 -1, ptr [[TMP_LOC]])
+; CHECK-NEXT: ret i32 [[TMP_RELOAD]]
;
entry:
%a.addr = alloca i32, align 4
%b.addr = alloca double, align 8
- %ap = alloca i8*, align 4
+ %ap = alloca ptr, align 4
%c = alloca i32, align 4
- store i32 %a, i32* %a.addr, align 4
- store double %b, double* %b.addr, align 8
- %ap1 = bitcast i8** %ap to i8*
- call void @llvm.va_start(i8* %ap1)
- %0 = va_arg i8** %ap, i32
- call void @llvm.va_copy(i8* %v, i8* %ap1)
- call void @llvm.va_end(i8* %ap1)
- store i32 %0, i32* %c, align 4
- %ap2 = bitcast i8** %ap to i8*
- %tmp = load i32, i32* %c, align 4
+ store i32 %a, ptr %a.addr, align 4
+ store double %b, ptr %b.addr, align 8
+ call void @llvm.va_start(ptr %ap)
+ %0 = va_arg ptr %ap, i32
+ call void @llvm.va_copy(ptr %v, ptr %ap)
+ call void @llvm.va_end(ptr %ap)
+ store i32 %0, ptr %c, align 4
+ %tmp = load i32, ptr %c, align 4
ret i32 %tmp
}
; block are included in the region and there is no more than one predecessor
; into those phi nodes from outside of the region.
-define void @function1(i32* %a, i32* %b) {
+define void @function1(ptr %a, ptr %b) {
entry:
%0 = alloca i32, align 4
- %c = load i32, i32* %0, align 4
+ %c = load i32, ptr %0, align 4
%y = add i32 %c, %c
br label %test1
dummy:
test1:
%1 = phi i32 [ %e, %test1 ], [ %y, %entry ]
%2 = phi i32 [ %e, %test1 ], [ %y, %entry ]
- %e = load i32, i32* %0, align 4
+ %e = load i32, ptr %0, align 4
%3 = add i32 %c, %c
br i1 true, label %test, label %test1
test:
- %d = load i32, i32* %0, align 4
+ %d = load i32, ptr %0, align 4
br label %first
first:
ret void
}
-define void @function2(i32* %a, i32* %b) {
+define void @function2(ptr %a, ptr %b) {
entry:
%0 = alloca i32, align 4
- %c = load i32, i32* %0, align 4
+ %c = load i32, ptr %0, align 4
%y = mul i32 %c, %c
br label %test1
dummy:
test1:
%1 = phi i32 [ %e, %test1 ], [ %y, %entry ]
%2 = phi i32 [ %e, %test1 ], [ %y, %entry ]
- %e = load i32, i32* %0, align 4
+ %e = load i32, ptr %0, align 4
%3 = add i32 %c, %c
br i1 true, label %test, label %test1
test:
- %d = load i32, i32* %0, align 4
+ %d = load i32, ptr %0, align 4
br label %first
first:
ret void
; CHECK-LABEL: @function1(
; CHECK-NEXT: entry:
; CHECK-NEXT: [[TMP0:%.*]] = alloca i32, align 4
-; CHECK-NEXT: [[C:%.*]] = load i32, i32* [[TMP0]], align 4
+; CHECK-NEXT: [[C:%.*]] = load i32, ptr [[TMP0]], align 4
; CHECK-NEXT: [[Y:%.*]] = add i32 [[C]], [[C]]
; CHECK-NEXT: br label [[TEST1:%.*]]
; CHECK: dummy:
; CHECK-NEXT: ret void
; CHECK: test1:
-; CHECK-NEXT: call void @outlined_ir_func_0(i32 [[Y]], i32* [[TMP0]], i32 [[C]])
+; CHECK-NEXT: call void @outlined_ir_func_0(i32 [[Y]], ptr [[TMP0]], i32 [[C]])
; CHECK-NEXT: br label [[FIRST:%.*]]
; CHECK: first:
; CHECK-NEXT: ret void
; CHECK-LABEL: @function2(
; CHECK-NEXT: entry:
; CHECK-NEXT: [[TMP0:%.*]] = alloca i32, align 4
-; CHECK-NEXT: [[C:%.*]] = load i32, i32* [[TMP0]], align 4
+; CHECK-NEXT: [[C:%.*]] = load i32, ptr [[TMP0]], align 4
; CHECK-NEXT: [[Y:%.*]] = mul i32 [[C]], [[C]]
; CHECK-NEXT: br label [[TEST1:%.*]]
; CHECK: dummy:
; CHECK-NEXT: ret void
; CHECK: test1:
-; CHECK-NEXT: call void @outlined_ir_func_0(i32 [[Y]], i32* [[TMP0]], i32 [[C]])
+; CHECK-NEXT: call void @outlined_ir_func_0(i32 [[Y]], ptr [[TMP0]], i32 [[C]])
; CHECK-NEXT: br label [[FIRST:%.*]]
; CHECK: first:
; CHECK-NEXT: ret void
; CHECK: test1_to_outline:
; CHECK-NEXT: [[TMP3:%.*]] = phi i32 [ [[E:%.*]], [[TEST1_TO_OUTLINE]] ], [ [[TMP0:%.*]], [[NEWFUNCROOT:%.*]] ]
; CHECK-NEXT: [[TMP4:%.*]] = phi i32 [ [[E]], [[TEST1_TO_OUTLINE]] ], [ [[TMP0]], [[NEWFUNCROOT]] ]
-; CHECK-NEXT: [[E]] = load i32, i32* [[TMP1:%.*]], align 4
+; CHECK-NEXT: [[E]] = load i32, ptr [[TMP1:%.*]], align 4
; CHECK-NEXT: [[TMP5:%.*]] = add i32 [[TMP2:%.*]], [[TMP2]]
; CHECK-NEXT: br i1 true, label [[TEST:%.*]], label [[TEST1_TO_OUTLINE]]
; CHECK: test:
-; CHECK-NEXT: [[D:%.*]] = load i32, i32* [[TMP1]], align 4
+; CHECK-NEXT: [[D:%.*]] = load i32, ptr [[TMP1]], align 4
; CHECK-NEXT: br label [[FIRST_EXITSTUB:%.*]]
; CHECK: first.exitStub:
; CHECK-NEXT: ret void
; Show that we are able to propogate inputs to the region into the split PHINode
; outside of the region if necessary.
-define void @function1(i32* %a, i32* %b) {
+define void @function1(ptr %a, ptr %b) {
entry:
%0 = alloca i32, align 4
- %c = load i32, i32* %0, align 4
+ %c = load i32, ptr %0, align 4
%z = add i32 %c, %c
br i1 true, label %test1, label %first
test1:
- %e = load i32, i32* %0, align 4
+ %e = load i32, ptr %0, align 4
%1 = add i32 %c, %c
br i1 true, label %first, label %test
test:
- %d = load i32, i32* %0, align 4
+ %d = load i32, ptr %0, align 4
br i1 true, label %first, label %next
first:
%2 = phi i32 [ %d, %test ], [ %e, %test1 ], [ %c, %entry ]
ret void
}
-define void @function2(i32* %a, i32* %b) {
+define void @function2(ptr %a, ptr %b) {
entry:
%0 = alloca i32, align 4
- %c = load i32, i32* %0, align 4
+ %c = load i32, ptr %0, align 4
%z = mul i32 %c, %c
br i1 true, label %test1, label %first
test1:
- %e = load i32, i32* %0, align 4
+ %e = load i32, ptr %0, align 4
%1 = add i32 %c, %c
br i1 true, label %first, label %test
test:
- %d = load i32, i32* %0, align 4
+ %d = load i32, ptr %0, align 4
br i1 true, label %first, label %next
first:
%2 = phi i32 [ %d, %test ], [ %e, %test1 ], [ %c, %entry ]
; CHECK-LABEL: @function1(
; CHECK-NEXT: entry:
; CHECK-NEXT: [[TMP0:%.*]] = alloca i32, align 4
-; CHECK-NEXT: [[C:%.*]] = load i32, i32* [[TMP0]], align 4
+; CHECK-NEXT: [[C:%.*]] = load i32, ptr [[TMP0]], align 4
; CHECK-NEXT: [[Z:%.*]] = add i32 [[C]], [[C]]
-; CHECK-NEXT: [[TARGETBLOCK:%.*]] = call i1 @outlined_ir_func_0(i32* [[TMP0]], i32 [[C]])
+; CHECK-NEXT: [[TARGETBLOCK:%.*]] = call i1 @outlined_ir_func_0(ptr [[TMP0]], i32 [[C]])
; CHECK-NEXT: br i1 [[TARGETBLOCK]], label [[NEXT:%.*]], label [[ENTRY_AFTER_OUTLINE:%.*]]
; CHECK: entry_after_outline:
; CHECK-NEXT: ret void
; CHECK-LABEL: @function2(
; CHECK-NEXT: entry:
; CHECK-NEXT: [[TMP0:%.*]] = alloca i32, align 4
-; CHECK-NEXT: [[C:%.*]] = load i32, i32* [[TMP0]], align 4
+; CHECK-NEXT: [[C:%.*]] = load i32, ptr [[TMP0]], align 4
; CHECK-NEXT: [[Z:%.*]] = mul i32 [[C]], [[C]]
-; CHECK-NEXT: [[TARGETBLOCK:%.*]] = call i1 @outlined_ir_func_0(i32* [[TMP0]], i32 [[C]])
+; CHECK-NEXT: [[TARGETBLOCK:%.*]] = call i1 @outlined_ir_func_0(ptr [[TMP0]], i32 [[C]])
; CHECK-NEXT: br i1 [[TARGETBLOCK]], label [[NEXT:%.*]], label [[ENTRY_AFTER_OUTLINE:%.*]]
; CHECK: entry_after_outline:
; CHECK-NEXT: ret void
; CHECK: entry_to_outline:
; CHECK-NEXT: br i1 true, label [[TEST1:%.*]], label [[FIRST:%.*]]
; CHECK: test1:
-; CHECK-NEXT: [[E:%.*]] = load i32, i32* [[TMP0:%.*]], align 4
+; CHECK-NEXT: [[E:%.*]] = load i32, ptr [[TMP0:%.*]], align 4
; CHECK-NEXT: [[TMP2:%.*]] = add i32 [[TMP1:%.*]], [[TMP1]]
; CHECK-NEXT: br i1 true, label [[FIRST]], label [[TEST:%.*]]
; CHECK: test:
-; CHECK-NEXT: [[D:%.*]] = load i32, i32* [[TMP0]], align 4
+; CHECK-NEXT: [[D:%.*]] = load i32, ptr [[TMP0]], align 4
; CHECK-NEXT: br i1 true, label [[FIRST]], label [[NEXT_EXITSTUB:%.*]]
; CHECK: first:
; CHECK-NEXT: [[TMP3:%.*]] = phi i32 [ [[D]], [[TEST]] ], [ [[E]], [[TEST1]] ], [ [[TMP1]], [[ENTRY_TO_OUTLINE]] ]
define void @function1() !dbg !6 {
entry:
%a = alloca i32, align 4, !dbg !17
- call void @llvm.dbg.value(metadata i32* %a, metadata !9, metadata !DIExpression()), !dbg !17
+ call void @llvm.dbg.value(metadata ptr %a, metadata !9, metadata !DIExpression()), !dbg !17
%b = alloca i32, align 4, !dbg !18
- call void @llvm.dbg.value(metadata i32* %b, metadata !11, metadata !DIExpression()), !dbg !18
+ call void @llvm.dbg.value(metadata ptr %b, metadata !11, metadata !DIExpression()), !dbg !18
%c = alloca i32, align 4, !dbg !19
- call void @llvm.dbg.value(metadata i32* %c, metadata !12, metadata !DIExpression()), !dbg !19
- store i32 2, i32* %a, align 4, !dbg !20
- store i32 3, i32* %b, align 4, !dbg !21
- store i32 4, i32* %c, align 4, !dbg !22
- %al = load i32, i32* %a, align 4, !dbg !23
+ call void @llvm.dbg.value(metadata ptr %c, metadata !12, metadata !DIExpression()), !dbg !19
+ store i32 2, ptr %a, align 4, !dbg !20
+ store i32 3, ptr %b, align 4, !dbg !21
+ store i32 4, ptr %c, align 4, !dbg !22
+ %al = load i32, ptr %a, align 4, !dbg !23
call void @llvm.dbg.value(metadata i32 %al, metadata !13, metadata !DIExpression()), !dbg !23
- %bl = load i32, i32* %b, align 4, !dbg !24
+ %bl = load i32, ptr %b, align 4, !dbg !24
call void @llvm.dbg.value(metadata i32 %bl, metadata !15, metadata !DIExpression()), !dbg !24
- %cl = load i32, i32* %c, align 4, !dbg !25
+ %cl = load i32, ptr %c, align 4, !dbg !25
call void @llvm.dbg.value(metadata i32 %cl, metadata !16, metadata !DIExpression()), !dbg !25
ret void, !dbg !26
}
define void @function2() !dbg !27 {
entry:
%a = alloca i32, align 4, !dbg !35
- call void @llvm.dbg.value(metadata i32* %a, metadata !29, metadata !DIExpression()), !dbg !35
+ call void @llvm.dbg.value(metadata ptr %a, metadata !29, metadata !DIExpression()), !dbg !35
%b = alloca i32, align 4, !dbg !36
- call void @llvm.dbg.value(metadata i32* %b, metadata !30, metadata !DIExpression()), !dbg !36
+ call void @llvm.dbg.value(metadata ptr %b, metadata !30, metadata !DIExpression()), !dbg !36
%c = alloca i32, align 4, !dbg !37
- call void @llvm.dbg.value(metadata i32* %c, metadata !31, metadata !DIExpression()), !dbg !37
- store i32 2, i32* %a, align 4, !dbg !38
- store i32 3, i32* %b, align 4, !dbg !39
- store i32 4, i32* %c, align 4, !dbg !40
- %al = load i32, i32* %a, align 4, !dbg !41
+ call void @llvm.dbg.value(metadata ptr %c, metadata !31, metadata !DIExpression()), !dbg !37
+ store i32 2, ptr %a, align 4, !dbg !38
+ store i32 3, ptr %b, align 4, !dbg !39
+ store i32 4, ptr %c, align 4, !dbg !40
+ %al = load i32, ptr %a, align 4, !dbg !41
call void @llvm.dbg.value(metadata i32 %al, metadata !32, metadata !DIExpression()), !dbg !41
- %bl = load i32, i32* %b, align 4, !dbg !42
+ %bl = load i32, ptr %b, align 4, !dbg !42
call void @llvm.dbg.value(metadata i32 %bl, metadata !33, metadata !DIExpression()), !dbg !42
- %cl = load i32, i32* %c, align 4, !dbg !43
+ %cl = load i32, ptr %c, align 4, !dbg !43
call void @llvm.dbg.value(metadata i32 %cl, metadata !34, metadata !DIExpression()), !dbg !43
ret void, !dbg !44
}
; CHECK-LABEL: @function1(
; CHECK-NEXT: entry:
; CHECK-NEXT: [[A:%.*]] = alloca i32, align 4, !dbg [[DBG17:![0-9]+]]
-; CHECK-NEXT: call void @llvm.dbg.value(metadata i32* [[A]], metadata [[META9:![0-9]+]], metadata !DIExpression()), !dbg [[DBG17]]
+; CHECK-NEXT: call void @llvm.dbg.value(metadata ptr [[A]], metadata [[META9:![0-9]+]], metadata !DIExpression()), !dbg [[DBG17]]
; CHECK-NEXT: [[B:%.*]] = alloca i32, align 4, !dbg [[DBG18:![0-9]+]]
-; CHECK-NEXT: call void @llvm.dbg.value(metadata i32* [[B]], metadata [[META11:![0-9]+]], metadata !DIExpression()), !dbg [[DBG18]]
+; CHECK-NEXT: call void @llvm.dbg.value(metadata ptr [[B]], metadata [[META11:![0-9]+]], metadata !DIExpression()), !dbg [[DBG18]]
; CHECK-NEXT: [[C:%.*]] = alloca i32, align 4, !dbg [[DBG19:![0-9]+]]
-; CHECK-NEXT: call void @llvm.dbg.value(metadata i32* [[C]], metadata [[META12:![0-9]+]], metadata !DIExpression()), !dbg [[DBG19]]
-; CHECK-NEXT: call void @outlined_ir_func_0(i32* [[A]], i32* [[B]], i32* [[C]]), !dbg [[DBG20:![0-9]+]]
+; CHECK-NEXT: call void @llvm.dbg.value(metadata ptr [[C]], metadata [[META12:![0-9]+]], metadata !DIExpression()), !dbg [[DBG19]]
+; CHECK-NEXT: call void @outlined_ir_func_0(ptr [[A]], ptr [[B]], ptr [[C]]), !dbg [[DBG20:![0-9]+]]
; CHECK-NEXT: ret void, !dbg [[DBG21:![0-9]+]]
;
;
; CHECK-LABEL: @function2(
; CHECK-NEXT: entry:
; CHECK-NEXT: [[A:%.*]] = alloca i32, align 4, !dbg [[DBG30:![0-9]+]]
-; CHECK-NEXT: call void @llvm.dbg.value(metadata i32* [[A]], metadata [[META24:![0-9]+]], metadata !DIExpression()), !dbg [[DBG30]]
+; CHECK-NEXT: call void @llvm.dbg.value(metadata ptr [[A]], metadata [[META24:![0-9]+]], metadata !DIExpression()), !dbg [[DBG30]]
; CHECK-NEXT: [[B:%.*]] = alloca i32, align 4, !dbg [[DBG31:![0-9]+]]
-; CHECK-NEXT: call void @llvm.dbg.value(metadata i32* [[B]], metadata [[META25:![0-9]+]], metadata !DIExpression()), !dbg [[DBG31]]
+; CHECK-NEXT: call void @llvm.dbg.value(metadata ptr [[B]], metadata [[META25:![0-9]+]], metadata !DIExpression()), !dbg [[DBG31]]
; CHECK-NEXT: [[C:%.*]] = alloca i32, align 4, !dbg [[DBG32:![0-9]+]]
-; CHECK-NEXT: call void @llvm.dbg.value(metadata i32* [[C]], metadata [[META26:![0-9]+]], metadata !DIExpression()), !dbg [[DBG32]]
-; CHECK-NEXT: call void @outlined_ir_func_0(i32* [[A]], i32* [[B]], i32* [[C]]), !dbg [[DBG33:![0-9]+]]
+; CHECK-NEXT: call void @llvm.dbg.value(metadata ptr [[C]], metadata [[META26:![0-9]+]], metadata !DIExpression()), !dbg [[DBG32]]
+; CHECK-NEXT: call void @outlined_ir_func_0(ptr [[A]], ptr [[B]], ptr [[C]]), !dbg [[DBG33:![0-9]+]]
; CHECK-NEXT: ret void, !dbg [[DBG34:![0-9]+]]
;
;
-; CHECK: @outlined_ir_func_0(i32* [[TMP0:%.*]], i32* [[TMP1:%.*]], i32* [[TMP2:%.*]])
+; CHECK: @outlined_ir_func_0(ptr [[TMP0:%.*]], ptr [[TMP1:%.*]], ptr [[TMP2:%.*]])
; CHECK: entry_to_outline:
-; CHECK-NEXT: store i32 2, i32* [[TMP0]], align 4
-; CHECK-NEXT: store i32 3, i32* [[TMP1]], align 4
-; CHECK-NEXT: store i32 4, i32* [[TMP2]], align 4
-; CHECK-NEXT: [[AL:%.*]] = load i32, i32* [[TMP0]], align 4
-; CHECK-NEXT: [[BL:%.*]] = load i32, i32* [[TMP1]], align 4
-; CHECK-NEXT: [[CL:%.*]] = load i32, i32* [[TMP2]], align 4
+; CHECK-NEXT: store i32 2, ptr [[TMP0]], align 4
+; CHECK-NEXT: store i32 3, ptr [[TMP1]], align 4
+; CHECK-NEXT: store i32 4, ptr [[TMP2]], align 4
+; CHECK-NEXT: [[AL:%.*]] = load i32, ptr [[TMP0]], align 4
+; CHECK-NEXT: [[BL:%.*]] = load i32, ptr [[TMP1]], align 4
+; CHECK-NEXT: [[CL:%.*]] = load i32, ptr [[TMP2]], align 4
; CHECK-NEXT: br label [[ENTRY_AFTER_OUTLINE_EXITSTUB:%.*]]
;
; This test checks that we do outline indirect calls when it is not specified
; that we should not.
-declare void @f1(i32*, i32*);
-declare void @f2(i32*, i32*);
+declare void @f1(ptr, ptr);
+declare void @f2(ptr, ptr);
-define void @function1(void()* %func) {
+define void @function1(ptr %func) {
entry:
%a = alloca i32, align 4
%b = alloca i32, align 4
%c = alloca i32, align 4
- store i32 2, i32* %a, align 4
- store i32 3, i32* %b, align 4
- store i32 4, i32* %c, align 4
+ store i32 2, ptr %a, align 4
+ store i32 3, ptr %b, align 4
+ store i32 4, ptr %c, align 4
call void %func()
- %al = load i32, i32* %a
- %bl = load i32, i32* %b
- %cl = load i32, i32* %c
+ %al = load i32, ptr %a
+ %bl = load i32, ptr %b
+ %cl = load i32, ptr %c
ret void
}
-define void @function2(void()* %func) {
+define void @function2(ptr %func) {
entry:
%a = alloca i32, align 4
%b = alloca i32, align 4
%c = alloca i32, align 4
- store i32 2, i32* %a, align 4
- store i32 3, i32* %b, align 4
- store i32 4, i32* %c, align 4
+ store i32 2, ptr %a, align 4
+ store i32 3, ptr %b, align 4
+ store i32 4, ptr %c, align 4
call void %func()
- %al = load i32, i32* %a
- %bl = load i32, i32* %b
- %cl = load i32, i32* %c
+ %al = load i32, ptr %a
+ %bl = load i32, ptr %b
+ %cl = load i32, ptr %c
ret void
}
; CHECK-LABEL: @function1(
; CHECK-NEXT: [[A:%.*]] = alloca i32, align 4
; CHECK-NEXT: [[B:%.*]] = alloca i32, align 4
; CHECK-NEXT: [[C:%.*]] = alloca i32, align 4
-; CHECK-NEXT: call void @outlined_ir_func_0(i32* [[A]], i32* [[B]], i32* [[C]], void ()* [[FUNC:%.*]])
+; CHECK-NEXT: call void @outlined_ir_func_0(ptr [[A]], ptr [[B]], ptr [[C]], ptr [[FUNC:%.*]])
; CHECK-NEXT: ret void
;
;
; CHECK-NEXT: [[A:%.*]] = alloca i32, align 4
; CHECK-NEXT: [[B:%.*]] = alloca i32, align 4
; CHECK-NEXT: [[C:%.*]] = alloca i32, align 4
-; CHECK-NEXT: call void @outlined_ir_func_0(i32* [[A]], i32* [[B]], i32* [[C]], void ()* [[FUNC:%.*]])
+; CHECK-NEXT: call void @outlined_ir_func_0(ptr [[A]], ptr [[B]], ptr [[C]], ptr [[FUNC:%.*]])
; CHECK-NEXT: ret void
;
;
; CHECK-NEXT: newFuncRoot:
; CHECK-NEXT: br label [[ENTRY_TO_OUTLINE:%.*]]
; CHECK: entry_to_outline:
-; CHECK-NEXT: store i32 2, i32* [[TMP0:%.*]], align 4
-; CHECK-NEXT: store i32 3, i32* [[TMP1:%.*]], align 4
-; CHECK-NEXT: store i32 4, i32* [[TMP2:%.*]], align 4
+; CHECK-NEXT: store i32 2, ptr [[TMP0:%.*]], align 4
+; CHECK-NEXT: store i32 3, ptr [[TMP1:%.*]], align 4
+; CHECK-NEXT: store i32 4, ptr [[TMP2:%.*]], align 4
; CHECK-NEXT: call void [[TMP3:%.*]]()
-; CHECK-NEXT: [[AL:%.*]] = load i32, i32* [[TMP0]], align 4
-; CHECK-NEXT: [[BL:%.*]] = load i32, i32* [[TMP1]], align 4
-; CHECK-NEXT: [[CL:%.*]] = load i32, i32* [[TMP2]], align 4
+; CHECK-NEXT: [[AL:%.*]] = load i32, ptr [[TMP0]], align 4
+; CHECK-NEXT: [[BL:%.*]] = load i32, ptr [[TMP1]], align 4
+; CHECK-NEXT: [[CL:%.*]] = load i32, ptr [[TMP2]], align 4
; CHECK-NEXT: br label [[ENTRY_AFTER_OUTLINE_EXITSTUB:%.*]]
; CHECK: entry_after_outline.exitStub:
; CHECK-NEXT: ret void
; Show that we are able to extract blocks that contain PHINodes, and selectively
; store into it's respective block, creating a new block if needed.
-define void @function1(i32* %a, i32* %b) {
+define void @function1(ptr %a, ptr %b) {
entry:
%0 = alloca i32, align 4
- %c = load i32, i32* %0, align 4
+ %c = load i32, ptr %0, align 4
br label %test1
test1:
- %e = load i32, i32* %0, align 4
+ %e = load i32, ptr %0, align 4
br label %first
test:
- %d = load i32, i32* %0, align 4
+ %d = load i32, ptr %0, align 4
br label %first
first:
ret void
}
-define void @function2(i32* %a, i32* %b) {
+define void @function2(ptr %a, ptr %b) {
entry:
%0 = alloca i32, align 4
- %c = load i32, i32* %0, align 4
+ %c = load i32, ptr %0, align 4
br label %test1
test1:
- %e = load i32, i32* %0, align 4
+ %e = load i32, ptr %0, align 4
br label %first
test:
- %d = load i32, i32* %0, align 4
+ %d = load i32, ptr %0, align 4
br label %first
first:
%1 = phi i32 [ %c, %test ], [ %e, %test1 ]
; CHECK-LABEL: @function1(
; CHECK-NEXT: entry:
; CHECK-NEXT: [[TMP0:%.*]] = alloca i32, align 4
-; CHECK-NEXT: call void @outlined_ir_func_0(i32* [[TMP0]], i32* null, i32 -1)
+; CHECK-NEXT: call void @outlined_ir_func_0(ptr [[TMP0]], ptr null, i32 -1)
; CHECK-NEXT: br label [[FIRST:%.*]]
; CHECK: first:
; CHECK-NEXT: ret void
; CHECK-NEXT: entry:
; CHECK-NEXT: [[DOTCE_LOC:%.*]] = alloca i32, align 4
; CHECK-NEXT: [[TMP0:%.*]] = alloca i32, align 4
-; CHECK-NEXT: [[LT_CAST:%.*]] = bitcast i32* [[DOTCE_LOC]] to i8*
-; CHECK-NEXT: call void @llvm.lifetime.start.p0i8(i64 -1, i8* [[LT_CAST]])
-; CHECK-NEXT: call void @outlined_ir_func_0(i32* [[TMP0]], i32* [[DOTCE_LOC]], i32 0)
-; CHECK-NEXT: [[DOTCE_RELOAD:%.*]] = load i32, i32* [[DOTCE_LOC]], align 4
-; CHECK-NEXT: call void @llvm.lifetime.end.p0i8(i64 -1, i8* [[LT_CAST]])
+; CHECK-NEXT: call void @llvm.lifetime.start.p0(i64 -1, ptr [[DOTCE_LOC]])
+; CHECK-NEXT: call void @outlined_ir_func_0(ptr [[TMP0]], ptr [[DOTCE_LOC]], i32 0)
+; CHECK-NEXT: [[DOTCE_RELOAD:%.*]] = load i32, ptr [[DOTCE_LOC]], align 4
+; CHECK-NEXT: call void @llvm.lifetime.end.p0(i64 -1, ptr [[DOTCE_LOC]])
; CHECK-NEXT: br label [[FIRST:%.*]]
; CHECK: first:
; CHECK-NEXT: [[TMP1:%.*]] = phi i32 [ [[DOTCE_RELOAD]], [[ENTRY:%.*]] ]
; CHECK-NEXT: newFuncRoot:
; CHECK-NEXT: br label [[ENTRY_TO_OUTLINE:%.*]]
; CHECK: entry_to_outline:
-; CHECK-NEXT: [[C:%.*]] = load i32, i32* [[TMP0:%.*]], align 4
+; CHECK-NEXT: [[C:%.*]] = load i32, ptr [[TMP0:%.*]], align 4
; CHECK-NEXT: br label [[TEST1:%.*]]
; CHECK: test1:
-; CHECK-NEXT: [[E:%.*]] = load i32, i32* [[TMP0]], align 4
+; CHECK-NEXT: [[E:%.*]] = load i32, ptr [[TMP0]], align 4
; CHECK-NEXT: br label [[PHI_BLOCK:%.*]]
; CHECK: test:
-; CHECK-NEXT: [[D:%.*]] = load i32, i32* [[TMP0]], align 4
+; CHECK-NEXT: [[D:%.*]] = load i32, ptr [[TMP0]], align 4
; CHECK-NEXT: br label [[PHI_BLOCK]]
; CHECK: first.exitStub:
; CHECK-NEXT: switch i32 [[TMP2:%.*]], label [[FINAL_BLOCK_0:%.*]] [
; CHECK-NEXT: i32 0, label [[OUTPUT_BLOCK_1_0:%.*]]
; CHECK-NEXT: ]
; CHECK: output_block_1_0:
-; CHECK-NEXT: store i32 [[TMP3:%.*]], i32* [[TMP1:%.*]], align 4
+; CHECK-NEXT: store i32 [[TMP3:%.*]], ptr [[TMP1:%.*]], align 4
; CHECK-NEXT: br label [[FINAL_BLOCK_0]]
; CHECK: phi_block:
; CHECK-NEXT: [[TMP3]] = phi i32 [ [[C]], [[TEST:%.*]] ], [ [[E]], [[TEST1]] ]
; Show that we are able to extract blocks that contain PHINodes, and selectively
; store into it's respective block, only using if needed.
-define void @function1(i32* %a, i32* %b) {
+define void @function1(ptr %a, ptr %b) {
entry:
%0 = alloca i32, align 4
- %c = load i32, i32* %0, align 4
+ %c = load i32, ptr %0, align 4
br label %test1
test1:
- %e = load i32, i32* %0, align 4
+ %e = load i32, ptr %0, align 4
br label %first
test:
- %d = load i32, i32* %0, align 4
+ %d = load i32, ptr %0, align 4
br label %first
first:
%1 = phi i32 [ %c, %test ], [ %e, %test1 ]
ret void
}
-define void @function2(i32* %a, i32* %b) {
+define void @function2(ptr %a, ptr %b) {
entry:
%0 = alloca i32, align 4
- %c = load i32, i32* %0, align 4
+ %c = load i32, ptr %0, align 4
br label %test1
test1:
- %e = load i32, i32* %0, align 4
+ %e = load i32, ptr %0, align 4
br label %first
test:
- %d = load i32, i32* %0, align 4
+ %d = load i32, ptr %0, align 4
br label %first
first:
ret void
; CHECK-NEXT: entry:
; CHECK-NEXT: [[DOTCE_LOC:%.*]] = alloca i32, align 4
; CHECK-NEXT: [[TMP0:%.*]] = alloca i32, align 4
-; CHECK-NEXT: [[LT_CAST:%.*]] = bitcast i32* [[DOTCE_LOC]] to i8*
-; CHECK-NEXT: call void @llvm.lifetime.start.p0i8(i64 -1, i8* [[LT_CAST]])
-; CHECK-NEXT: call void @outlined_ir_func_0(i32* [[TMP0]], i32* [[DOTCE_LOC]], i32 0)
-; CHECK-NEXT: [[DOTCE_RELOAD:%.*]] = load i32, i32* [[DOTCE_LOC]], align 4
-; CHECK-NEXT: call void @llvm.lifetime.end.p0i8(i64 -1, i8* [[LT_CAST]])
+; CHECK-NEXT: call void @llvm.lifetime.start.p0(i64 -1, ptr [[DOTCE_LOC]])
+; CHECK-NEXT: call void @outlined_ir_func_0(ptr [[TMP0]], ptr [[DOTCE_LOC]], i32 0)
+; CHECK-NEXT: [[DOTCE_RELOAD:%.*]] = load i32, ptr [[DOTCE_LOC]], align 4
+; CHECK-NEXT: call void @llvm.lifetime.end.p0(i64 -1, ptr [[DOTCE_LOC]])
; CHECK-NEXT: br label [[FIRST:%.*]]
; CHECK: first:
; CHECK-NEXT: [[TMP1:%.*]] = phi i32 [ [[DOTCE_RELOAD]], [[ENTRY:%.*]] ]
; CHECK-LABEL: @function2(
; CHECK-NEXT: entry:
; CHECK-NEXT: [[TMP0:%.*]] = alloca i32, align 4
-; CHECK-NEXT: call void @outlined_ir_func_0(i32* [[TMP0]], i32* null, i32 -1)
+; CHECK-NEXT: call void @outlined_ir_func_0(ptr [[TMP0]], ptr null, i32 -1)
; CHECK-NEXT: br label [[FIRST:%.*]]
; CHECK: first:
; CHECK-NEXT: ret void
; CHECK-NEXT: newFuncRoot:
; CHECK-NEXT: br label [[ENTRY_TO_OUTLINE:%.*]]
; CHECK: entry_to_outline:
-; CHECK-NEXT: [[C:%.*]] = load i32, i32* [[TMP0:%.*]], align 4
+; CHECK-NEXT: [[C:%.*]] = load i32, ptr [[TMP0:%.*]], align 4
; CHECK-NEXT: br label [[TEST1:%.*]]
; CHECK: test1:
-; CHECK-NEXT: [[E:%.*]] = load i32, i32* [[TMP0]], align 4
+; CHECK-NEXT: [[E:%.*]] = load i32, ptr [[TMP0]], align 4
; CHECK-NEXT: br label [[FIRST_SPLIT:%.*]]
; CHECK: test:
-; CHECK-NEXT: [[D:%.*]] = load i32, i32* [[TMP0]], align 4
+; CHECK-NEXT: [[D:%.*]] = load i32, ptr [[TMP0]], align 4
; CHECK-NEXT: br label [[FIRST_SPLIT]]
; CHECK: first.split:
; CHECK-NEXT: [[DOTCE:%.*]] = phi i32 [ [[C]], [[TEST:%.*]] ], [ [[E]], [[TEST1]] ]
; CHECK-NEXT: i32 0, label [[OUTPUT_BLOCK_0_0:%.*]]
; CHECK-NEXT: ]
; CHECK: output_block_0_0:
-; CHECK-NEXT: store i32 [[DOTCE]], i32* [[TMP1:%.*]], align 4
+; CHECK-NEXT: store i32 [[DOTCE]], ptr [[TMP1:%.*]], align 4
; CHECK-NEXT: br label [[FINAL_BLOCK_0]]
; CHECK: final_block_0:
; CHECK-NEXT: ret void
; Show that we do not extract similar regions that would involve the splitting
; of phi nodes on exit.
-define void @function1(i32* %a, i32* %b) {
+define void @function1(ptr %a, ptr %b) {
entry:
%0 = alloca i32, align 4
- %c = load i32, i32* %0, align 4
+ %c = load i32, ptr %0, align 4
br label %test1
test1:
- %e = load i32, i32* %0, align 4
+ %e = load i32, ptr %0, align 4
br i1 true, label %first, label %test
test:
- %d = load i32, i32* %0, align 4
+ %d = load i32, ptr %0, align 4
br i1 true, label %first, label %next
first:
%1 = phi i32 [ %c, %test ], [ %e, %test1 ]
ret void
}
-define void @function2(i32* %a, i32* %b) {
+define void @function2(ptr %a, ptr %b) {
entry:
%0 = alloca i32, align 4
- %c = load i32, i32* %0, align 4
+ %c = load i32, ptr %0, align 4
br label %test1
test1:
- %e = load i32, i32* %0, align 4
+ %e = load i32, ptr %0, align 4
br i1 true, label %first, label %test
test:
- %d = load i32, i32* %0, align 4
+ %d = load i32, ptr %0, align 4
br i1 true, label %first, label %next
first:
ret void
; CHECK-NEXT: [[D_LOC:%.*]] = alloca i32, align 4
; CHECK-NEXT: [[E_LOC:%.*]] = alloca i32, align 4
; CHECK-NEXT: [[TMP0:%.*]] = alloca i32, align 4
-; CHECK-NEXT: [[LT_CAST:%.*]] = bitcast i32* [[E_LOC]] to i8*
-; CHECK-NEXT: call void @llvm.lifetime.start.p0i8(i64 -1, i8* [[LT_CAST]])
-; CHECK-NEXT: [[LT_CAST1:%.*]] = bitcast i32* [[D_LOC]] to i8*
-; CHECK-NEXT: call void @llvm.lifetime.start.p0i8(i64 -1, i8* [[LT_CAST1]])
-; CHECK-NEXT: [[LT_CAST2:%.*]] = bitcast i32* [[DOTCE_LOC]] to i8*
-; CHECK-NEXT: call void @llvm.lifetime.start.p0i8(i64 -1, i8* [[LT_CAST2]])
-; CHECK-NEXT: [[TMP1:%.*]] = call i1 @outlined_ir_func_0(i32* [[TMP0]], i32* [[E_LOC]], i32* [[D_LOC]], i32* [[DOTCE_LOC]], i32 0)
-; CHECK-NEXT: [[E_RELOAD:%.*]] = load i32, i32* [[E_LOC]], align 4
-; CHECK-NEXT: [[D_RELOAD:%.*]] = load i32, i32* [[D_LOC]], align 4
-; CHECK-NEXT: [[DOTCE_RELOAD:%.*]] = load i32, i32* [[DOTCE_LOC]], align 4
-; CHECK-NEXT: call void @llvm.lifetime.end.p0i8(i64 -1, i8* [[LT_CAST]])
-; CHECK-NEXT: call void @llvm.lifetime.end.p0i8(i64 -1, i8* [[LT_CAST1]])
-; CHECK-NEXT: call void @llvm.lifetime.end.p0i8(i64 -1, i8* [[LT_CAST2]])
+; CHECK-NEXT: call void @llvm.lifetime.start.p0(i64 -1, ptr [[E_LOC]])
+; CHECK-NEXT: call void @llvm.lifetime.start.p0(i64 -1, ptr [[D_LOC]])
+; CHECK-NEXT: call void @llvm.lifetime.start.p0(i64 -1, ptr [[DOTCE_LOC]])
+; CHECK-NEXT: [[TMP1:%.*]] = call i1 @outlined_ir_func_0(ptr [[TMP0]], ptr [[E_LOC]], ptr [[D_LOC]], ptr [[DOTCE_LOC]], i32 0)
+; CHECK-NEXT: [[E_RELOAD:%.*]] = load i32, ptr [[E_LOC]], align 4
+; CHECK-NEXT: [[D_RELOAD:%.*]] = load i32, ptr [[D_LOC]], align 4
+; CHECK-NEXT: [[DOTCE_RELOAD:%.*]] = load i32, ptr [[DOTCE_LOC]], align 4
+; CHECK-NEXT: call void @llvm.lifetime.end.p0(i64 -1, ptr [[E_LOC]])
+; CHECK-NEXT: call void @llvm.lifetime.end.p0(i64 -1, ptr [[D_LOC]])
+; CHECK-NEXT: call void @llvm.lifetime.end.p0(i64 -1, ptr [[DOTCE_LOC]])
; CHECK-NEXT: br i1 [[TMP1]], label [[FIRST:%.*]], label [[NEXT:%.*]]
; CHECK: first:
; CHECK-NEXT: [[TMP2:%.*]] = phi i32 [ [[DOTCE_RELOAD]], [[ENTRY:%.*]] ]
; CHECK-NEXT: [[D_LOC:%.*]] = alloca i32, align 4
; CHECK-NEXT: [[E_LOC:%.*]] = alloca i32, align 4
; CHECK-NEXT: [[TMP0:%.*]] = alloca i32, align 4
-; CHECK-NEXT: [[LT_CAST:%.*]] = bitcast i32* [[E_LOC]] to i8*
-; CHECK-NEXT: call void @llvm.lifetime.start.p0i8(i64 -1, i8* [[LT_CAST]])
-; CHECK-NEXT: [[LT_CAST1:%.*]] = bitcast i32* [[D_LOC]] to i8*
-; CHECK-NEXT: call void @llvm.lifetime.start.p0i8(i64 -1, i8* [[LT_CAST1]])
-; CHECK-NEXT: [[TMP1:%.*]] = call i1 @outlined_ir_func_0(i32* [[TMP0]], i32* [[E_LOC]], i32* [[D_LOC]], i32* null, i32 1)
-; CHECK-NEXT: [[E_RELOAD:%.*]] = load i32, i32* [[E_LOC]], align 4
-; CHECK-NEXT: [[D_RELOAD:%.*]] = load i32, i32* [[D_LOC]], align 4
-; CHECK-NEXT: call void @llvm.lifetime.end.p0i8(i64 -1, i8* [[LT_CAST]])
-; CHECK-NEXT: call void @llvm.lifetime.end.p0i8(i64 -1, i8* [[LT_CAST1]])
+; CHECK-NEXT: call void @llvm.lifetime.start.p0(i64 -1, ptr [[E_LOC]])
+; CHECK-NEXT: call void @llvm.lifetime.start.p0(i64 -1, ptr [[D_LOC]])
+; CHECK-NEXT: [[TMP1:%.*]] = call i1 @outlined_ir_func_0(ptr [[TMP0]], ptr [[E_LOC]], ptr [[D_LOC]], ptr null, i32 1)
+; CHECK-NEXT: [[E_RELOAD:%.*]] = load i32, ptr [[E_LOC]], align 4
+; CHECK-NEXT: [[D_RELOAD:%.*]] = load i32, ptr [[D_LOC]], align 4
+; CHECK-NEXT: call void @llvm.lifetime.end.p0(i64 -1, ptr [[E_LOC]])
+; CHECK-NEXT: call void @llvm.lifetime.end.p0(i64 -1, ptr [[D_LOC]])
; CHECK-NEXT: br i1 [[TMP1]], label [[FIRST:%.*]], label [[NEXT:%.*]]
; CHECK: first:
; CHECK-NEXT: ret void
; CHECK-NEXT: newFuncRoot:
; CHECK-NEXT: br label [[ENTRY_TO_OUTLINE:%.*]]
; CHECK: entry_to_outline:
-; CHECK-NEXT: [[C:%.*]] = load i32, i32* [[TMP0:%.*]], align 4
+; CHECK-NEXT: [[C:%.*]] = load i32, ptr [[TMP0:%.*]], align 4
; CHECK-NEXT: br label [[TEST1:%.*]]
; CHECK: test1:
-; CHECK-NEXT: [[E:%.*]] = load i32, i32* [[TMP0]], align 4
+; CHECK-NEXT: [[E:%.*]] = load i32, ptr [[TMP0]], align 4
; CHECK-NEXT: br i1 true, label [[FIRST_SPLIT:%.*]], label [[TEST:%.*]]
; CHECK: test:
-; CHECK-NEXT: [[D:%.*]] = load i32, i32* [[TMP0]], align 4
+; CHECK-NEXT: [[D:%.*]] = load i32, ptr [[TMP0]], align 4
; CHECK-NEXT: br i1 true, label [[FIRST_SPLIT]], label [[NEXT_EXITSTUB:%.*]]
; CHECK: first.split:
; CHECK-NEXT: [[DOTCE:%.*]] = phi i32 [ [[C]], [[TEST]] ], [ [[E]], [[TEST1]] ]
; CHECK-NEXT: i32 1, label [[OUTPUT_BLOCK_1_0:%.*]]
; CHECK-NEXT: ]
; CHECK: output_block_0_0:
-; CHECK-NEXT: store i32 [[E]], i32* [[TMP1:%.*]], align 4
-; CHECK-NEXT: store i32 [[D]], i32* [[TMP2:%.*]], align 4
+; CHECK-NEXT: store i32 [[E]], ptr [[TMP1:%.*]], align 4
+; CHECK-NEXT: store i32 [[D]], ptr [[TMP2:%.*]], align 4
; CHECK-NEXT: br label [[FINAL_BLOCK_0]]
; CHECK: output_block_0_1:
-; CHECK-NEXT: store i32 [[E]], i32* [[TMP1]], align 4
-; CHECK-NEXT: store i32 [[DOTCE]], i32* [[TMP3:%.*]], align 4
+; CHECK-NEXT: store i32 [[E]], ptr [[TMP1]], align 4
+; CHECK-NEXT: store i32 [[DOTCE]], ptr [[TMP3:%.*]], align 4
; CHECK-NEXT: br label [[FINAL_BLOCK_1]]
; CHECK: output_block_1_0:
-; CHECK-NEXT: store i32 [[E]], i32* [[TMP1]], align 4
-; CHECK-NEXT: store i32 [[D]], i32* [[TMP2]], align 4
+; CHECK-NEXT: store i32 [[E]], ptr [[TMP1]], align 4
+; CHECK-NEXT: store i32 [[D]], ptr [[TMP2]], align 4
; CHECK-NEXT: br label [[FINAL_BLOCK_0]]
; CHECK: output_block_1_1:
-; CHECK-NEXT: store i32 [[E]], i32* [[TMP1]], align 4
+; CHECK-NEXT: store i32 [[E]], ptr [[TMP1]], align 4
; CHECK-NEXT: br label [[FINAL_BLOCK_1]]
; CHECK: final_block_0:
; CHECK-NEXT: ret i1 false
; Show that we do not outline when all of the phi nodes in the beginning
; block are included not in the region.
-define void @function1(i32* %a, i32* %b) {
+define void @function1(ptr %a, ptr %b) {
entry:
%0 = alloca i32, align 4
- %c = load i32, i32* %0, align 4
+ %c = load i32, ptr %0, align 4
%y = add i32 %c, %c
br label %test1
dummy:
test1:
%1 = phi i32 [ %e, %test1 ], [ %y, %entry ]
%2 = phi i32 [ %e, %test1 ], [ %y, %entry ]
- %e = load i32, i32* %0, align 4
+ %e = load i32, ptr %0, align 4
%3 = add i32 %c, %c
br i1 true, label %test, label %test1
test:
- %d = load i32, i32* %0, align 4
+ %d = load i32, ptr %0, align 4
br label %first
first:
ret void
}
-define void @function2(i32* %a, i32* %b) {
+define void @function2(ptr %a, ptr %b) {
entry:
%0 = alloca i32, align 4
- %c = load i32, i32* %0, align 4
+ %c = load i32, ptr %0, align 4
%y = mul i32 %c, %c
br label %test1
dummy:
test1:
%1 = phi i32 [ %e, %test1 ], [ %y, %entry ]
%2 = phi i32 [ %y, %entry ], [ %e, %test1 ]
- %e = load i32, i32* %0, align 4
+ %e = load i32, ptr %0, align 4
%3 = add i32 %c, %c
br i1 true, label %test, label %test1
test:
- %d = load i32, i32* %0, align 4
+ %d = load i32, ptr %0, align 4
br label %first
first:
ret void
; CHECK-NEXT: entry:
; CHECK-NEXT: [[E_LOC:%.*]] = alloca i32, align 4
; CHECK-NEXT: [[TMP0:%.*]] = alloca i32, align 4
-; CHECK-NEXT: [[C:%.*]] = load i32, i32* [[TMP0]], align 4
+; CHECK-NEXT: [[C:%.*]] = load i32, ptr [[TMP0]], align 4
; CHECK-NEXT: [[Y:%.*]] = add i32 [[C]], [[C]]
; CHECK-NEXT: br label [[TEST1:%.*]]
; CHECK: dummy:
; CHECK: test1:
; CHECK-NEXT: [[TMP1:%.*]] = phi i32 [ [[E_RELOAD:%.*]], [[TEST1]] ], [ [[Y]], [[ENTRY:%.*]] ]
; CHECK-NEXT: [[TMP2:%.*]] = phi i32 [ [[E_RELOAD]], [[TEST1]] ], [ [[Y]], [[ENTRY]] ]
-; CHECK-NEXT: [[LT_CAST:%.*]] = bitcast i32* [[E_LOC]] to i8*
-; CHECK-NEXT: call void @llvm.lifetime.start.p0i8(i64 -1, i8* [[LT_CAST]])
-; CHECK-NEXT: [[TARGETBLOCK:%.*]] = call i1 @outlined_ir_func_0(i32* [[TMP0]], i32 [[C]], i32* [[E_LOC]])
-; CHECK-NEXT: [[E_RELOAD]] = load i32, i32* [[E_LOC]], align 4
-; CHECK-NEXT: call void @llvm.lifetime.end.p0i8(i64 -1, i8* [[LT_CAST]])
+; CHECK-NEXT: call void @llvm.lifetime.start.p0(i64 -1, ptr [[E_LOC]])
+; CHECK-NEXT: [[TARGETBLOCK:%.*]] = call i1 @outlined_ir_func_0(ptr [[TMP0]], i32 [[C]], ptr [[E_LOC]])
+; CHECK-NEXT: [[E_RELOAD]] = load i32, ptr [[E_LOC]], align 4
+; CHECK-NEXT: call void @llvm.lifetime.end.p0(i64 -1, ptr [[E_LOC]])
; CHECK-NEXT: br i1 [[TARGETBLOCK]], label [[TEST1]], label [[FIRST:%.*]]
; CHECK: first:
; CHECK-NEXT: ret void
; CHECK-NEXT: entry:
; CHECK-NEXT: [[E_LOC:%.*]] = alloca i32, align 4
; CHECK-NEXT: [[TMP0:%.*]] = alloca i32, align 4
-; CHECK-NEXT: [[C:%.*]] = load i32, i32* [[TMP0]], align 4
+; CHECK-NEXT: [[C:%.*]] = load i32, ptr [[TMP0]], align 4
; CHECK-NEXT: [[Y:%.*]] = mul i32 [[C]], [[C]]
; CHECK-NEXT: br label [[TEST1:%.*]]
; CHECK: dummy:
; CHECK: test1:
; CHECK-NEXT: [[TMP1:%.*]] = phi i32 [ [[E_RELOAD:%.*]], [[TEST1]] ], [ [[Y]], [[ENTRY:%.*]] ]
; CHECK-NEXT: [[TMP2:%.*]] = phi i32 [ [[Y]], [[ENTRY]] ], [ [[E_RELOAD]], [[TEST1]] ]
-; CHECK-NEXT: [[LT_CAST:%.*]] = bitcast i32* [[E_LOC]] to i8*
-; CHECK-NEXT: call void @llvm.lifetime.start.p0i8(i64 -1, i8* [[LT_CAST]])
-; CHECK-NEXT: [[TARGETBLOCK:%.*]] = call i1 @outlined_ir_func_0(i32* [[TMP0]], i32 [[C]], i32* [[E_LOC]])
-; CHECK-NEXT: [[E_RELOAD]] = load i32, i32* [[E_LOC]], align 4
-; CHECK-NEXT: call void @llvm.lifetime.end.p0i8(i64 -1, i8* [[LT_CAST]])
+; CHECK-NEXT: call void @llvm.lifetime.start.p0(i64 -1, ptr [[E_LOC]])
+; CHECK-NEXT: [[TARGETBLOCK:%.*]] = call i1 @outlined_ir_func_0(ptr [[TMP0]], i32 [[C]], ptr [[E_LOC]])
+; CHECK-NEXT: [[E_RELOAD]] = load i32, ptr [[E_LOC]], align 4
+; CHECK-NEXT: call void @llvm.lifetime.end.p0(i64 -1, ptr [[E_LOC]])
; CHECK-NEXT: br i1 [[TARGETBLOCK]], label [[TEST1]], label [[FIRST:%.*]]
; CHECK: first:
; CHECK-NEXT: ret void
; CHECK-NEXT: newFuncRoot:
; CHECK-NEXT: br label [[TEST1_TO_OUTLINE:%.*]]
; CHECK: test1_to_outline:
-; CHECK-NEXT: [[E:%.*]] = load i32, i32* [[TMP0:%.*]], align 4
+; CHECK-NEXT: [[E:%.*]] = load i32, ptr [[TMP0:%.*]], align 4
; CHECK-NEXT: [[TMP3:%.*]] = add i32 [[TMP1:%.*]], [[TMP1]]
; CHECK-NEXT: br i1 true, label [[TEST:%.*]], label [[TEST1_EXITSTUB:%.*]]
; CHECK: test:
-; CHECK-NEXT: [[D:%.*]] = load i32, i32* [[TMP0]], align 4
+; CHECK-NEXT: [[D:%.*]] = load i32, ptr [[TMP0]], align 4
; CHECK-NEXT: br label [[FIRST_EXITSTUB:%.*]]
; CHECK: test1.exitStub:
-; CHECK-NEXT: store i32 [[E]], i32* [[TMP2:%.*]], align 4
+; CHECK-NEXT: store i32 [[E]], ptr [[TMP2:%.*]], align 4
; CHECK-NEXT: ret i1 true
; CHECK: first.exitStub:
-; CHECK-NEXT: store i32 [[E]], i32* [[TMP2]], align 4
+; CHECK-NEXT: store i32 [[E]], ptr [[TMP2]], align 4
; CHECK-NEXT: ret i1 false
;
; Show that we do not outline when all of the phi nodes in the end
; block are not included in the region.
-define void @function1(i32* %a, i32* %b) {
+define void @function1(ptr %a, ptr %b) {
entry:
%0 = alloca i32, align 4
- %c = load i32, i32* %0, align 4
+ %c = load i32, ptr %0, align 4
%z = add i32 %c, %c
br i1 true, label %test1, label %first
test1:
- %e = load i32, i32* %0, align 4
+ %e = load i32, ptr %0, align 4
%1 = add i32 %c, %c
br i1 true, label %first, label %test
test:
- %d = load i32, i32* %0, align 4
+ %d = load i32, ptr %0, align 4
br i1 true, label %first, label %next
first:
%2 = phi i32 [ %d, %test ], [ %e, %test1 ], [ %c, %entry ]
ret void
}
-define void @function2(i32* %a, i32* %b) {
+define void @function2(ptr %a, ptr %b) {
entry:
%0 = alloca i32, align 4
- %c = load i32, i32* %0, align 4
+ %c = load i32, ptr %0, align 4
%z = mul i32 %c, %c
br i1 true, label %test1, label %first
test1:
- %e = load i32, i32* %0, align 4
+ %e = load i32, ptr %0, align 4
%1 = add i32 %c, %c
br i1 true, label %first, label %test
test:
- %d = load i32, i32* %0, align 4
+ %d = load i32, ptr %0, align 4
br i1 true, label %first, label %next
first:
%2 = phi i32 [ %d, %test ], [ %e, %test1 ], [ %c, %entry ]
; CHECK-LABEL: @function1(
; CHECK-NEXT: entry:
; CHECK-NEXT: [[TMP0:%.*]] = alloca i32, align 4
-; CHECK-NEXT: [[C:%.*]] = load i32, i32* [[TMP0]], align 4
+; CHECK-NEXT: [[C:%.*]] = load i32, ptr [[TMP0]], align 4
; CHECK-NEXT: [[Z:%.*]] = add i32 [[C]], [[C]]
; CHECK-NEXT: br i1 true, label [[TEST1:%.*]], label [[FIRST:%.*]]
; CHECK: test1:
-; CHECK-NEXT: [[E:%.*]] = load i32, i32* [[TMP0]], align 4
+; CHECK-NEXT: [[E:%.*]] = load i32, ptr [[TMP0]], align 4
; CHECK-NEXT: [[TMP1:%.*]] = add i32 [[C]], [[C]]
; CHECK-NEXT: br i1 true, label [[FIRST]], label [[TEST:%.*]]
; CHECK: test:
-; CHECK-NEXT: [[D:%.*]] = load i32, i32* [[TMP0]], align 4
+; CHECK-NEXT: [[D:%.*]] = load i32, ptr [[TMP0]], align 4
; CHECK-NEXT: br i1 true, label [[FIRST]], label [[NEXT:%.*]]
; CHECK: first:
; CHECK-NEXT: [[TMP2:%.*]] = phi i32 [ [[D]], [[TEST]] ], [ [[E]], [[TEST1]] ], [ [[C]], [[ENTRY:%.*]] ]
; CHECK-LABEL: @function2(
; CHECK-NEXT: entry:
; CHECK-NEXT: [[TMP0:%.*]] = alloca i32, align 4
-; CHECK-NEXT: [[C:%.*]] = load i32, i32* [[TMP0]], align 4
+; CHECK-NEXT: [[C:%.*]] = load i32, ptr [[TMP0]], align 4
; CHECK-NEXT: [[Z:%.*]] = mul i32 [[C]], [[C]]
; CHECK-NEXT: br i1 true, label [[TEST1:%.*]], label [[FIRST:%.*]]
; CHECK: test1:
-; CHECK-NEXT: [[E:%.*]] = load i32, i32* [[TMP0]], align 4
+; CHECK-NEXT: [[E:%.*]] = load i32, ptr [[TMP0]], align 4
; CHECK-NEXT: [[TMP1:%.*]] = add i32 [[C]], [[C]]
; CHECK-NEXT: br i1 true, label [[FIRST]], label [[TEST:%.*]]
; CHECK: test:
-; CHECK-NEXT: [[D:%.*]] = load i32, i32* [[TMP0]], align 4
+; CHECK-NEXT: [[D:%.*]] = load i32, ptr [[TMP0]], align 4
; CHECK-NEXT: br i1 true, label [[FIRST]], label [[NEXT:%.*]]
; CHECK: first:
; CHECK-NEXT: [[TMP2:%.*]] = phi i32 [ [[D]], [[TEST]] ], [ [[E]], [[TEST1]] ], [ [[C]], [[ENTRY:%.*]] ]
%a = alloca i32, align 4
%b = alloca i32, align 4
%c = alloca i32, align 4
- store i32 3, i32* %a, align 4
- store i32 4, i32* %b, align 4
- store i32 5, i32* %c, align 4
- %al = load i32, i32* %a
- %bl = load i32, i32* %b
- %cl = load i32, i32* %c
+ store i32 3, ptr %a, align 4
+ store i32 4, ptr %b, align 4
+ store i32 5, ptr %c, align 4
+ %al = load i32, ptr %a
+ %bl = load i32, ptr %b
+ %cl = load i32, ptr %c
ret void
}
%a = alloca i32, align 4
%b = alloca i32, align 4
%c = alloca i32, align 4
- store i32 2, i32* %a, align 4
- store i32 3, i32* %b, align 4
- store i32 4, i32* %c, align 4
- %al = load i32, i32* %a
- %bl = load i32, i32* %b
- %cl = load i32, i32* %c
+ store i32 2, ptr %a, align 4
+ store i32 3, ptr %b, align 4
+ store i32 4, ptr %c, align 4
+ %al = load i32, ptr %a
+ %bl = load i32, ptr %b
+ %cl = load i32, ptr %c
ret void
}
; CHECK-NEXT: [[A:%.*]] = alloca i32, align 4
; CHECK-NEXT: [[B:%.*]] = alloca i32, align 4
; CHECK-NEXT: [[C:%.*]] = alloca i32, align 4
-; CHECK-NEXT: store i32 3, i32* [[A]], align 4
-; CHECK-NEXT: store i32 4, i32* [[B]], align 4
-; CHECK-NEXT: store i32 5, i32* [[C]], align 4
-; CHECK-NEXT: [[AL:%.*]] = load i32, i32* [[A]], align 4
-; CHECK-NEXT: [[BL:%.*]] = load i32, i32* [[B]], align 4
-; CHECK-NEXT: [[CL:%.*]] = load i32, i32* [[C]], align 4
+; CHECK-NEXT: store i32 3, ptr [[A]], align 4
+; CHECK-NEXT: store i32 4, ptr [[B]], align 4
+; CHECK-NEXT: store i32 5, ptr [[C]], align 4
+; CHECK-NEXT: [[AL:%.*]] = load i32, ptr [[A]], align 4
+; CHECK-NEXT: [[BL:%.*]] = load i32, ptr [[B]], align 4
+; CHECK-NEXT: [[CL:%.*]] = load i32, ptr [[C]], align 4
; CHECK-NEXT: ret void
;
;
; CHECK-NEXT: [[A:%.*]] = alloca i32, align 4
; CHECK-NEXT: [[B:%.*]] = alloca i32, align 4
; CHECK-NEXT: [[C:%.*]] = alloca i32, align 4
-; CHECK-NEXT: store i32 2, i32* [[A]], align 4
-; CHECK-NEXT: store i32 3, i32* [[B]], align 4
-; CHECK-NEXT: store i32 4, i32* [[C]], align 4
-; CHECK-NEXT: [[AL:%.*]] = load i32, i32* [[A]], align 4
-; CHECK-NEXT: [[BL:%.*]] = load i32, i32* [[B]], align 4
-; CHECK-NEXT: [[CL:%.*]] = load i32, i32* [[C]], align 4
+; CHECK-NEXT: store i32 2, ptr [[A]], align 4
+; CHECK-NEXT: store i32 3, ptr [[B]], align 4
+; CHECK-NEXT: store i32 4, ptr [[C]], align 4
+; CHECK-NEXT: [[AL:%.*]] = load i32, ptr [[A]], align 4
+; CHECK-NEXT: [[BL:%.*]] = load i32, ptr [[B]], align 4
+; CHECK-NEXT: [[CL:%.*]] = load i32, ptr [[C]], align 4
; CHECK-NEXT: ret void
;
%b = alloca i32, align 4
%output = alloca i32, align 4
%result = alloca i32, align 4
- store i32 2, i32* %a, align 4
- store i32 3, i32* %b, align 4
- %0 = load i32, i32* %a, align 4
- %1 = load i32, i32* %b, align 4
+ store i32 2, ptr %a, align 4
+ store i32 3, ptr %b, align 4
+ %0 = load i32, ptr %a, align 4
+ %1 = load i32, ptr %b, align 4
%add = add i32 %0, %1
- store i32 %add, i32* %output, align 4
- %2 = load i32, i32* %output, align 4
- %3 = load i32, i32* %output, align 4
+ store i32 %add, ptr %output, align 4
+ %2 = load i32, ptr %output, align 4
+ %3 = load i32, ptr %output, align 4
%mul = mul i32 %2, %add
- store i32 %mul, i32* %result, align 4
+ store i32 %mul, ptr %result, align 4
ret void
}
%b = alloca i32, align 4
%output = alloca i32, align 4
%result = alloca i32, align 4
- store i32 2, i32* %a, align 4
- store i32 3, i32* %b, align 4
- %0 = load i32, i32* %a, align 4
- %1 = load i32, i32* %b, align 4
+ store i32 2, ptr %a, align 4
+ store i32 3, ptr %b, align 4
+ %0 = load i32, ptr %a, align 4
+ %1 = load i32, ptr %b, align 4
%add = add i32 %0, %1
- store i32 %add, i32* %output, align 4
- %2 = load i32, i32* %output, align 4
- %3 = load i32, i32* %output, align 4
+ store i32 %add, ptr %output, align 4
+ %2 = load i32, ptr %output, align 4
+ %3 = load i32, ptr %output, align 4
%mul = mul i32 %2, %add
- store i32 %mul, i32* %result, align 4
+ store i32 %mul, ptr %result, align 4
ret void
}
entry:
%a = alloca i32, align 4
%b = alloca i32, align 4
- %0 = load i32, i32* %a, align 4
- %1 = load i32, i32* %b, align 4
+ %0 = load i32, ptr %a, align 4
+ %1 = load i32, ptr %b, align 4
%add = add i32 %0, %1
%mul = mul i32 %0, %1
%sub = sub i32 %0, %1
entry:
%a = alloca i32, align 4
%b = alloca i32, align 4
- %0 = load i32, i32* %a, align 4
- %1 = load i32, i32* %b, align 4
+ %0 = load i32, ptr %a, align 4
+ %1 = load i32, ptr %b, align 4
%add = add i32 %0, %1
%mul = mul i32 %0, %1
%sub = sub i32 %0, %1
; This test checks that we successfully outline identical memcpy instructions.
-declare void @llvm.memcpy.p0i8.p0i8.i64(i8* nocapture writeonly, i8* nocapture readonly, i64, i1)
+declare void @llvm.memcpy.p0.p0.i64(ptr nocapture writeonly, ptr nocapture readonly, i64, i1)
-define i8 @function1(i8* noalias %s, i8* noalias %d, i64 %len) {
+define i8 @function1(ptr noalias %s, ptr noalias %d, i64 %len) {
entry:
- %a = load i8, i8* %s
- %b = load i8, i8* %d
- call void @llvm.memcpy.p0i8.p0i8.i64(i8* %d, i8* %s, i64 %len, i1 false)
+ %a = load i8, ptr %s
+ %b = load i8, ptr %d
+ call void @llvm.memcpy.p0.p0.i64(ptr %d, ptr %s, i64 %len, i1 false)
%c = add i8 %a, %b
- %ret = load i8, i8* %s
+ %ret = load i8, ptr %s
ret i8 %ret
}
-define i8 @function2(i8* noalias %s, i8* noalias %d, i64 %len) {
+define i8 @function2(ptr noalias %s, ptr noalias %d, i64 %len) {
entry:
- %a = load i8, i8* %s
- %b = load i8, i8* %d
- call void @llvm.memcpy.p0i8.p0i8.i64(i8* %d, i8* %s, i64 %len, i1 false)
+ %a = load i8, ptr %s
+ %b = load i8, ptr %d
+ call void @llvm.memcpy.p0.p0.i64(ptr %d, ptr %s, i64 %len, i1 false)
%c = add i8 %a, %b
- %ret = load i8, i8* %s
+ %ret = load i8, ptr %s
ret i8 %ret
}
; CHECK-LABEL: @function1(
; CHECK-NEXT: entry:
; CHECK-NEXT: [[RET_LOC:%.*]] = alloca i8, align 1
-; CHECK-NEXT: call void @llvm.lifetime.start.p0i8(i64 -1, i8* [[RET_LOC]])
-; CHECK-NEXT: call void @outlined_ir_func_0(i8* [[S:%.*]], i8* [[D:%.*]], i64 [[LEN:%.*]], i8* [[RET_LOC]])
-; CHECK-NEXT: [[RET_RELOAD:%.*]] = load i8, i8* [[RET_LOC]], align 1
-; CHECK-NEXT: call void @llvm.lifetime.end.p0i8(i64 -1, i8* [[RET_LOC]])
+; CHECK-NEXT: call void @llvm.lifetime.start.p0(i64 -1, ptr [[RET_LOC]])
+; CHECK-NEXT: call void @outlined_ir_func_0(ptr [[S:%.*]], ptr [[D:%.*]], i64 [[LEN:%.*]], ptr [[RET_LOC]])
+; CHECK-NEXT: [[RET_RELOAD:%.*]] = load i8, ptr [[RET_LOC]], align 1
+; CHECK-NEXT: call void @llvm.lifetime.end.p0(i64 -1, ptr [[RET_LOC]])
; CHECK-NEXT: ret i8 [[RET_RELOAD]]
;
;
; CHECK-LABEL: @function2(
; CHECK-NEXT: entry:
; CHECK-NEXT: [[RET_LOC:%.*]] = alloca i8, align 1
-; CHECK-NEXT: call void @llvm.lifetime.start.p0i8(i64 -1, i8* [[RET_LOC]])
-; CHECK-NEXT: call void @outlined_ir_func_0(i8* [[S:%.*]], i8* [[D:%.*]], i64 [[LEN:%.*]], i8* [[RET_LOC]])
-; CHECK-NEXT: [[RET_RELOAD:%.*]] = load i8, i8* [[RET_LOC]], align 1
-; CHECK-NEXT: call void @llvm.lifetime.end.p0i8(i64 -1, i8* [[RET_LOC]])
+; CHECK-NEXT: call void @llvm.lifetime.start.p0(i64 -1, ptr [[RET_LOC]])
+; CHECK-NEXT: call void @outlined_ir_func_0(ptr [[S:%.*]], ptr [[D:%.*]], i64 [[LEN:%.*]], ptr [[RET_LOC]])
+; CHECK-NEXT: [[RET_RELOAD:%.*]] = load i8, ptr [[RET_LOC]], align 1
+; CHECK-NEXT: call void @llvm.lifetime.end.p0(i64 -1, ptr [[RET_LOC]])
; CHECK-NEXT: ret i8 [[RET_RELOAD]]
;
;
; CHECK-NEXT: newFuncRoot:
; CHECK-NEXT: br label [[ENTRY_TO_OUTLINE:%.*]]
; CHECK: entry_to_outline:
-; CHECK-NEXT: [[A:%.*]] = load i8, i8* [[TMP0:%.*]], align 1
-; CHECK-NEXT: [[B:%.*]] = load i8, i8* [[TMP1:%.*]], align 1
-; CHECK-NEXT: call void @llvm.memcpy.p0i8.p0i8.i64(i8* [[TMP1]], i8* [[TMP0]], i64 [[TMP2:%.*]], i1 false)
+; CHECK-NEXT: [[A:%.*]] = load i8, ptr [[TMP0:%.*]], align 1
+; CHECK-NEXT: [[B:%.*]] = load i8, ptr [[TMP1:%.*]], align 1
+; CHECK-NEXT: call void @llvm.memcpy.p0.p0.i64(ptr [[TMP1]], ptr [[TMP0]], i64 [[TMP2:%.*]], i1 false)
; CHECK-NEXT: [[C:%.*]] = add i8 [[A]], [[B]]
-; CHECK-NEXT: [[RET:%.*]] = load i8, i8* [[TMP0]], align 1
+; CHECK-NEXT: [[RET:%.*]] = load i8, ptr [[TMP0]], align 1
; CHECK-NEXT: br label [[ENTRY_AFTER_OUTLINE_EXITSTUB:%.*]]
; CHECK: entry_after_outline.exitStub:
-; CHECK-NEXT: store i8 [[RET]], i8* [[TMP3:%.*]], align 1
+; CHECK-NEXT: store i8 [[RET]], ptr [[TMP3:%.*]], align 1
; CHECK-NEXT: ret void
;
; This test checks that we sucecssfully outline identical memmove instructions.
-declare void @llvm.memmove.p0i8.p0i8.i64(i8* nocapture writeonly, i8* nocapture readonly, i64, i1)
+declare void @llvm.memmove.p0.p0.i64(ptr nocapture writeonly, ptr nocapture readonly, i64, i1)
-define i8 @function1(i8* noalias %s, i8* noalias %d, i64 %len) {
+define i8 @function1(ptr noalias %s, ptr noalias %d, i64 %len) {
entry:
- %a = load i8, i8* %s
- %b = load i8, i8* %d
- call void @llvm.memmove.p0i8.p0i8.i64(i8* %d, i8* %s, i64 %len, i1 false)
+ %a = load i8, ptr %s
+ %b = load i8, ptr %d
+ call void @llvm.memmove.p0.p0.i64(ptr %d, ptr %s, i64 %len, i1 false)
%c = add i8 %a, %b
- %ret = load i8, i8* %s
+ %ret = load i8, ptr %s
ret i8 %ret
}
-define i8 @function2(i8* noalias %s, i8* noalias %d, i64 %len) {
+define i8 @function2(ptr noalias %s, ptr noalias %d, i64 %len) {
entry:
- %a = load i8, i8* %s
- %b = load i8, i8* %d
- call void @llvm.memmove.p0i8.p0i8.i64(i8* %d, i8* %s, i64 %len, i1 false)
+ %a = load i8, ptr %s
+ %b = load i8, ptr %d
+ call void @llvm.memmove.p0.p0.i64(ptr %d, ptr %s, i64 %len, i1 false)
%c = add i8 %a, %b
- %ret = load i8, i8* %s
+ %ret = load i8, ptr %s
ret i8 %ret
}
; CHECK-LABEL: @function1(
; CHECK-NEXT: entry:
; CHECK-NEXT: [[RET_LOC:%.*]] = alloca i8, align 1
-; CHECK-NEXT: call void @llvm.lifetime.start.p0i8(i64 -1, i8* [[RET_LOC]])
-; CHECK-NEXT: call void @outlined_ir_func_0(i8* [[S:%.*]], i8* [[D:%.*]], i64 [[LEN:%.*]], i8* [[RET_LOC]])
-; CHECK-NEXT: [[RET_RELOAD:%.*]] = load i8, i8* [[RET_LOC]], align 1
-; CHECK-NEXT: call void @llvm.lifetime.end.p0i8(i64 -1, i8* [[RET_LOC]])
+; CHECK-NEXT: call void @llvm.lifetime.start.p0(i64 -1, ptr [[RET_LOC]])
+; CHECK-NEXT: call void @outlined_ir_func_0(ptr [[S:%.*]], ptr [[D:%.*]], i64 [[LEN:%.*]], ptr [[RET_LOC]])
+; CHECK-NEXT: [[RET_RELOAD:%.*]] = load i8, ptr [[RET_LOC]], align 1
+; CHECK-NEXT: call void @llvm.lifetime.end.p0(i64 -1, ptr [[RET_LOC]])
; CHECK-NEXT: ret i8 [[RET_RELOAD]]
;
;
; CHECK-LABEL: @function2(
; CHECK-NEXT: entry:
; CHECK-NEXT: [[RET_LOC:%.*]] = alloca i8, align 1
-; CHECK-NEXT: call void @llvm.lifetime.start.p0i8(i64 -1, i8* [[RET_LOC]])
-; CHECK-NEXT: call void @outlined_ir_func_0(i8* [[S:%.*]], i8* [[D:%.*]], i64 [[LEN:%.*]], i8* [[RET_LOC]])
-; CHECK-NEXT: [[RET_RELOAD:%.*]] = load i8, i8* [[RET_LOC]], align 1
-; CHECK-NEXT: call void @llvm.lifetime.end.p0i8(i64 -1, i8* [[RET_LOC]])
+; CHECK-NEXT: call void @llvm.lifetime.start.p0(i64 -1, ptr [[RET_LOC]])
+; CHECK-NEXT: call void @outlined_ir_func_0(ptr [[S:%.*]], ptr [[D:%.*]], i64 [[LEN:%.*]], ptr [[RET_LOC]])
+; CHECK-NEXT: [[RET_RELOAD:%.*]] = load i8, ptr [[RET_LOC]], align 1
+; CHECK-NEXT: call void @llvm.lifetime.end.p0(i64 -1, ptr [[RET_LOC]])
; CHECK-NEXT: ret i8 [[RET_RELOAD]]
;
;
; CHECK-NEXT: newFuncRoot:
; CHECK-NEXT: br label [[ENTRY_TO_OUTLINE:%.*]]
; CHECK: entry_to_outline:
-; CHECK-NEXT: [[A:%.*]] = load i8, i8* [[TMP0:%.*]], align 1
-; CHECK-NEXT: [[B:%.*]] = load i8, i8* [[TMP1:%.*]], align 1
-; CHECK-NEXT: call void @llvm.memmove.p0i8.p0i8.i64(i8* [[TMP1]], i8* [[TMP0]], i64 [[TMP2:%.*]], i1 false)
+; CHECK-NEXT: [[A:%.*]] = load i8, ptr [[TMP0:%.*]], align 1
+; CHECK-NEXT: [[B:%.*]] = load i8, ptr [[TMP1:%.*]], align 1
+; CHECK-NEXT: call void @llvm.memmove.p0.p0.i64(ptr [[TMP1]], ptr [[TMP0]], i64 [[TMP2:%.*]], i1 false)
; CHECK-NEXT: [[C:%.*]] = add i8 [[A]], [[B]]
-; CHECK-NEXT: [[RET:%.*]] = load i8, i8* [[TMP0]], align 1
+; CHECK-NEXT: [[RET:%.*]] = load i8, ptr [[TMP0]], align 1
; CHECK-NEXT: br label [[ENTRY_AFTER_OUTLINE_EXITSTUB:%.*]]
; CHECK: entry_after_outline.exitStub:
-; CHECK-NEXT: store i8 [[RET]], i8* [[TMP3:%.*]], align 1
+; CHECK-NEXT: store i8 [[RET]], ptr [[TMP3:%.*]], align 1
; CHECK-NEXT: ret void
;
; This test checks that we successfully outline identical memset instructions.
-declare void @llvm.memset.p0i8.i64(i8* nocapture writeonly, i8, i64, i32, i1)
+declare void @llvm.memset.p0.i64(ptr nocapture writeonly, i8, i64, i32, i1)
define i64 @function1(i64 %x, i64 %z, i64 %n) {
entry:
%pool = alloca [59 x i64], align 4
- %tmp = bitcast [59 x i64]* %pool to i8*
- call void @llvm.memset.p0i8.i64(i8* nonnull %tmp, i8 0, i64 236, i32 4, i1 false)
+ call void @llvm.memset.p0.i64(ptr nonnull %pool, i8 0, i64 236, i32 4, i1 false)
%cmp3 = icmp eq i64 %n, 0
%a = add i64 %x, %z
%c = add i64 %x, %z
define i64 @function2(i64 %x, i64 %z, i64 %n) {
entry:
%pool = alloca [59 x i64], align 4
- %tmp = bitcast [59 x i64]* %pool to i8*
- call void @llvm.memset.p0i8.i64(i8* nonnull %tmp, i8 0, i64 236, i32 4, i1 false)
+ call void @llvm.memset.p0.i64(ptr nonnull %pool, i8 0, i64 236, i32 4, i1 false)
%cmp3 = icmp eq i64 %n, 0
%a = add i64 %x, %z
%c = add i64 %x, %z
; CHECK-LABEL: @function1(
; CHECK-NEXT: entry:
; CHECK-NEXT: [[POOL:%.*]] = alloca [59 x i64], align 4
-; CHECK-NEXT: call void @outlined_ir_func_0([59 x i64]* [[POOL]], i64 [[N:%.*]], i64 [[X:%.*]], i64 [[Z:%.*]])
+; CHECK-NEXT: call void @outlined_ir_func_0(ptr [[POOL]], i64 [[N:%.*]], i64 [[X:%.*]], i64 [[Z:%.*]])
; CHECK-NEXT: ret i64 0
;
;
; CHECK-LABEL: @function2(
; CHECK-NEXT: entry:
; CHECK-NEXT: [[POOL:%.*]] = alloca [59 x i64], align 4
-; CHECK-NEXT: call void @outlined_ir_func_0([59 x i64]* [[POOL]], i64 [[N:%.*]], i64 [[X:%.*]], i64 [[Z:%.*]])
+; CHECK-NEXT: call void @outlined_ir_func_0(ptr [[POOL]], i64 [[N:%.*]], i64 [[X:%.*]], i64 [[Z:%.*]])
; CHECK-NEXT: ret i64 0
;
;
; CHECK-NEXT: newFuncRoot:
; CHECK-NEXT: br label [[ENTRY_TO_OUTLINE:%.*]]
; CHECK: entry_to_outline:
-; CHECK-NEXT: [[TMP:%.*]] = bitcast [59 x i64]* [[TMP0:%.*]] to i8*
-; CHECK-NEXT: call void @llvm.memset.p0i8.i64(i8* nonnull align 4 [[TMP]], i8 0, i64 236, i1 false)
+; CHECK-NEXT: call void @llvm.memset.p0.i64(ptr nonnull align 4 [[TMP0:%.*]], i8 0, i64 236, i1 false)
; CHECK-NEXT: [[CMP3:%.*]] = icmp eq i64 [[TMP1:%.*]], 0
; CHECK-NEXT: [[A:%.*]] = add i64 [[TMP2:%.*]], [[TMP3:%.*]]
; CHECK-NEXT: [[C:%.*]] = add i64 [[TMP2]], [[TMP3]]
-; NOTE: Assertions have been autogenerated by utils/update_test_checks.py UTC_ARGS: --include-generated-funcs
+; NOTE: Assertions have been autogenerated by utils/update_test_checks.py UTC_ARGS: --function-signature --include-generated-funcs
; RUN: opt -S -passes=verify,iroutliner -ir-outlining-no-cost < %s | FileCheck %s
; This test checks that we sucessfully outline identical memcpy var arg
; intrinsics, but not the var arg instruction itself.
-declare void @llvm.va_start(i8*)
-declare void @llvm.va_copy(i8*, i8*)
-declare void @llvm.va_end(i8*)
+declare void @llvm.va_start(ptr)
+declare void @llvm.va_copy(ptr, ptr)
+declare void @llvm.va_end(ptr)
-define i32 @func1(i32 %a, double %b, i8* %v, ...) nounwind {
+define i32 @func1(i32 %a, double %b, ptr %v, ...) nounwind {
entry:
%a.addr = alloca i32, align 4
%b.addr = alloca double, align 8
- %ap = alloca i8*, align 4
+ %ap = alloca ptr, align 4
%c = alloca i32, align 4
- store i32 %a, i32* %a.addr, align 4
- store double %b, double* %b.addr, align 8
- %ap1 = bitcast i8** %ap to i8*
- call void @llvm.va_start(i8* %ap1)
- %0 = va_arg i8** %ap, i32
- call void @llvm.va_copy(i8* %v, i8* %ap1)
- call void @llvm.va_end(i8* %ap1)
- store i32 %0, i32* %c, align 4
- %tmp = load i32, i32* %c, align 4
+ store i32 %a, ptr %a.addr, align 4
+ store double %b, ptr %b.addr, align 8
+ call void @llvm.va_start(ptr %ap)
+ %0 = va_arg ptr %ap, i32
+ call void @llvm.va_copy(ptr %v, ptr %ap)
+ call void @llvm.va_end(ptr %ap)
+ store i32 %0, ptr %c, align 4
+ %tmp = load i32, ptr %c, align 4
ret i32 %tmp
}
-define i32 @func2(i32 %a, double %b, i8* %v, ...) nounwind {
+define i32 @func2(i32 %a, double %b, ptr %v, ...) nounwind {
entry:
%a.addr = alloca i32, align 4
%b.addr = alloca double, align 8
- %ap = alloca i8*, align 4
+ %ap = alloca ptr, align 4
%c = alloca i32, align 4
- store i32 %a, i32* %a.addr, align 4
- store double %b, double* %b.addr, align 8
- %ap1 = bitcast i8** %ap to i8*
- call void @llvm.va_start(i8* %ap1)
- %0 = va_arg i8** %ap, i32
- call void @llvm.va_copy(i8* %v, i8* %ap1)
- call void @llvm.va_end(i8* %ap1)
- store i32 %0, i32* %c, align 4
- %ap2 = bitcast i8** %ap to i8*
- %tmp = load i32, i32* %c, align 4
+ store i32 %a, ptr %a.addr, align 4
+ store double %b, ptr %b.addr, align 8
+ call void @llvm.va_start(ptr %ap)
+ %0 = va_arg ptr %ap, i32
+ call void @llvm.va_copy(ptr %v, ptr %ap)
+ call void @llvm.va_end(ptr %ap)
+ store i32 %0, ptr %c, align 4
+ %tmp = load i32, ptr %c, align 4
ret i32 %tmp
}
-; CHECK-LABEL: @func1(
+; CHECK-LABEL: define {{[^@]+}}@func1
+; CHECK-SAME: (i32 [[A:%.*]], double [[B:%.*]], ptr [[V:%.*]], ...) #[[ATTR1:[0-9]+]] {
; CHECK-NEXT: entry:
+; CHECK-NEXT: [[TMP_LOC:%.*]] = alloca i32, align 4
; CHECK-NEXT: [[A_ADDR:%.*]] = alloca i32, align 4
; CHECK-NEXT: [[B_ADDR:%.*]] = alloca double, align 8
-; CHECK-NEXT: [[AP:%.*]] = alloca i8*, align 4
+; CHECK-NEXT: [[AP:%.*]] = alloca ptr, align 4
; CHECK-NEXT: [[C:%.*]] = alloca i32, align 4
-; CHECK-NEXT: store i32 [[A:%.*]], i32* [[A_ADDR]], align 4
-; CHECK-NEXT: store double [[B:%.*]], double* [[B_ADDR]], align 8
-; CHECK-NEXT: [[AP1:%.*]] = bitcast i8** [[AP]] to i8*
-; CHECK-NEXT: call void @llvm.va_start(i8* [[AP1]])
-; CHECK-NEXT: [[TMP0:%.*]] = va_arg i8** [[AP]], i32
-; CHECK-NEXT: call void @outlined_ir_func_0(i8* [[V:%.*]], i8* [[AP1]], i32 [[TMP0]], i32* [[C]])
-; CHECK-NEXT: [[TMP:%.*]] = load i32, i32* [[C]], align 4
-; CHECK-NEXT: ret i32 [[TMP]]
+; CHECK-NEXT: store i32 [[A]], ptr [[A_ADDR]], align 4
+; CHECK-NEXT: store double [[B]], ptr [[B_ADDR]], align 8
+; CHECK-NEXT: call void @llvm.va_start(ptr [[AP]])
+; CHECK-NEXT: [[TMP0:%.*]] = va_arg ptr [[AP]], i32
+; CHECK-NEXT: call void @llvm.lifetime.start.p0(i64 -1, ptr [[TMP_LOC]])
+; CHECK-NEXT: call void @outlined_ir_func_0(ptr [[V]], ptr [[AP]], i32 [[TMP0]], ptr [[C]], ptr [[TMP_LOC]])
+; CHECK-NEXT: [[TMP_RELOAD:%.*]] = load i32, ptr [[TMP_LOC]], align 4
+; CHECK-NEXT: call void @llvm.lifetime.end.p0(i64 -1, ptr [[TMP_LOC]])
+; CHECK-NEXT: ret i32 [[TMP_RELOAD]]
;
;
-; CHECK-LABEL: @func2(
+; CHECK-LABEL: define {{[^@]+}}@func2
+; CHECK-SAME: (i32 [[A:%.*]], double [[B:%.*]], ptr [[V:%.*]], ...) #[[ATTR1]] {
; CHECK-NEXT: entry:
+; CHECK-NEXT: [[TMP_LOC:%.*]] = alloca i32, align 4
; CHECK-NEXT: [[A_ADDR:%.*]] = alloca i32, align 4
; CHECK-NEXT: [[B_ADDR:%.*]] = alloca double, align 8
-; CHECK-NEXT: [[AP:%.*]] = alloca i8*, align 4
+; CHECK-NEXT: [[AP:%.*]] = alloca ptr, align 4
; CHECK-NEXT: [[C:%.*]] = alloca i32, align 4
-; CHECK-NEXT: store i32 [[A:%.*]], i32* [[A_ADDR]], align 4
-; CHECK-NEXT: store double [[B:%.*]], double* [[B_ADDR]], align 8
-; CHECK-NEXT: [[AP1:%.*]] = bitcast i8** [[AP]] to i8*
-; CHECK-NEXT: call void @llvm.va_start(i8* [[AP1]])
-; CHECK-NEXT: [[TMP0:%.*]] = va_arg i8** [[AP]], i32
-; CHECK-NEXT: call void @outlined_ir_func_0(i8* [[V:%.*]], i8* [[AP1]], i32 [[TMP0]], i32* [[C]])
-; CHECK-NEXT: [[AP2:%.*]] = bitcast i8** [[AP]] to i8*
-; CHECK-NEXT: [[TMP:%.*]] = load i32, i32* [[C]], align 4
-; CHECK-NEXT: ret i32 [[TMP]]
+; CHECK-NEXT: store i32 [[A]], ptr [[A_ADDR]], align 4
+; CHECK-NEXT: store double [[B]], ptr [[B_ADDR]], align 8
+; CHECK-NEXT: call void @llvm.va_start(ptr [[AP]])
+; CHECK-NEXT: [[TMP0:%.*]] = va_arg ptr [[AP]], i32
+; CHECK-NEXT: call void @llvm.lifetime.start.p0(i64 -1, ptr [[TMP_LOC]])
+; CHECK-NEXT: call void @outlined_ir_func_0(ptr [[V]], ptr [[AP]], i32 [[TMP0]], ptr [[C]], ptr [[TMP_LOC]])
+; CHECK-NEXT: [[TMP_RELOAD:%.*]] = load i32, ptr [[TMP_LOC]], align 4
+; CHECK-NEXT: call void @llvm.lifetime.end.p0(i64 -1, ptr [[TMP_LOC]])
+; CHECK-NEXT: ret i32 [[TMP_RELOAD]]
;
;
-; CHECK: define internal void @outlined_ir_func_0(
+; CHECK-LABEL: define {{[^@]+}}@outlined_ir_func_0
+; CHECK-SAME: (ptr [[TMP0:%.*]], ptr [[TMP1:%.*]], i32 [[TMP2:%.*]], ptr [[TMP3:%.*]], ptr [[TMP4:%.*]]) #[[ATTR3:[0-9]+]] {
; CHECK-NEXT: newFuncRoot:
; CHECK-NEXT: br label [[ENTRY_TO_OUTLINE:%.*]]
; CHECK: entry_to_outline:
-; CHECK-NEXT: call void @llvm.va_copy(i8* [[TMP0:%.*]], i8* [[TMP1:%.*]])
-; CHECK-NEXT: call void @llvm.va_end(i8* [[TMP1]])
-; CHECK-NEXT: store i32 [[TMP2:%.*]], i32* [[TMP3:%.*]], align 4
+; CHECK-NEXT: call void @llvm.va_copy(ptr [[TMP0]], ptr [[TMP1]])
+; CHECK-NEXT: call void @llvm.va_end(ptr [[TMP1]])
+; CHECK-NEXT: store i32 [[TMP2]], ptr [[TMP3]], align 4
+; CHECK-NEXT: [[TMP:%.*]] = load i32, ptr [[TMP3]], align 4
; CHECK-NEXT: br label [[ENTRY_AFTER_OUTLINE_EXITSTUB:%.*]]
; CHECK: entry_after_outline.exitStub:
+; CHECK-NEXT: store i32 [[TMP]], ptr [[TMP4]], align 4
; CHECK-NEXT: ret void
;
%result = alloca i32, align 4
%output2 = alloca i32, align 4
%result2 = alloca i32, align 4
- store i32 2, i32* %a, align 4
- store i32 3, i32* %b, align 4
+ store i32 2, ptr %a, align 4
+ store i32 3, ptr %b, align 4
br label %next
next:
- store i32 2, i32* %output, align 4
- store i32 3, i32* %result, align 4
+ store i32 2, ptr %output, align 4
+ store i32 3, ptr %result, align 4
ret void
}
%result = alloca i32, align 4
%output2 = alloca i32, align 4
%result2 = alloca i32, align 4
- store i32 2, i32* %a, align 4
- store i32 3, i32* %b, align 4
+ store i32 2, ptr %a, align 4
+ store i32 3, ptr %b, align 4
br label %next
next:
- store i32 2, i32* %output, align 4
- store i32 3, i32* %result, align 4
+ store i32 2, ptr %output, align 4
+ store i32 3, ptr %result, align 4
ret void
}
; CHECK-LABEL: @outline_outputs1(
; CHECK-NEXT: [[RESULT:%.*]] = alloca i32, align 4
; CHECK-NEXT: [[OUTPUT2:%.*]] = alloca i32, align 4
; CHECK-NEXT: [[RESULT2:%.*]] = alloca i32, align 4
-; CHECK-NEXT: call void @outlined_ir_func_0(i32* [[A]], i32* [[B]], i32* [[OUTPUT]], i32* [[RESULT]])
+; CHECK-NEXT: call void @outlined_ir_func_0(ptr [[A]], ptr [[B]], ptr [[OUTPUT]], ptr [[RESULT]])
; CHECK-NEXT: ret void
;
;
; CHECK-NEXT: [[RESULT:%.*]] = alloca i32, align 4
; CHECK-NEXT: [[OUTPUT2:%.*]] = alloca i32, align 4
; CHECK-NEXT: [[RESULT2:%.*]] = alloca i32, align 4
-; CHECK-NEXT: call void @outlined_ir_func_0(i32* [[A]], i32* [[B]], i32* [[OUTPUT]], i32* [[RESULT]])
+; CHECK-NEXT: call void @outlined_ir_func_0(ptr [[A]], ptr [[B]], ptr [[OUTPUT]], ptr [[RESULT]])
; CHECK-NEXT: ret void
;
;
; CHECK: newFuncRoot:
; CHECK-NEXT: br label [[ENTRY_TO_OUTLINE:%.*]]
; CHECK: entry_to_outline:
-; CHECK-NEXT: store i32 2, i32* [[TMP0:%.*]], align 4
-; CHECK-NEXT: store i32 3, i32* [[TMP1:%.*]], align 4
+; CHECK-NEXT: store i32 2, ptr [[TMP0:%.*]], align 4
+; CHECK-NEXT: store i32 3, ptr [[TMP1:%.*]], align 4
; CHECK-NEXT: br label [[NEXT:%.*]]
; CHECK: next:
-; CHECK-NEXT: store i32 2, i32* [[TMP2:%.*]], align 4
-; CHECK-NEXT: store i32 3, i32* [[TMP3:%.*]], align 4
+; CHECK-NEXT: store i32 2, ptr [[TMP2:%.*]], align 4
+; CHECK-NEXT: store i32 3, ptr [[TMP3:%.*]], align 4
; CHECK-NEXT: br label [[ENTRY_AFTER_OUTLINE_EXITSTUB:%.*]]
; CHECK: entry_after_outline.exitStub:
; CHECK-NEXT: ret void
; This test shows that we do not outline from basic blocks with their address
; taken.
-@ba1 = constant i8* blockaddress (@dontoutline, %new_block)
+@ba1 = constant ptr blockaddress (@dontoutline, %new_block)
define void @outline_1() {
; CHECK-LABEL: @outline_1(
; CHECK-NEXT: [[A:%.*]] = alloca i32, align 4
; CHECK-NEXT: [[B:%.*]] = alloca i32, align 4
; CHECK-NEXT: [[C:%.*]] = alloca i32, align 4
-; CHECK-NEXT: call void @[[FUNCTION_1:.*]](i32* [[A]], i32* [[B]], i32* [[C]])
+; CHECK-NEXT: call void @[[FUNCTION_1:.*]](ptr [[A]], ptr [[B]], ptr [[C]])
; CHECK-NEXT: ret void
;
entry:
%a = alloca i32, align 4
%b = alloca i32, align 4
%c = alloca i32, align 4
- store i32 2, i32* %a, align 4
- store i32 3, i32* %b, align 4
- store i32 4, i32* %c, align 4
- %al = load i32, i32* %a
- %bl = load i32, i32* %b
- %cl = load i32, i32* %c
+ store i32 2, ptr %a, align 4
+ store i32 3, ptr %b, align 4
+ store i32 4, ptr %c, align 4
+ %al = load i32, ptr %a
+ %bl = load i32, ptr %b
+ %cl = load i32, ptr %c
ret void
}
; CHECK-NEXT: [[A:%.*]] = alloca i32, align 4
; CHECK-NEXT: [[B:%.*]] = alloca i32, align 4
; CHECK-NEXT: [[C:%.*]] = alloca i32, align 4
-; CHECK-NEXT: call void @[[FUNCTION_1]](i32* [[A]], i32* [[B]], i32* [[C]])
+; CHECK-NEXT: call void @[[FUNCTION_1]](ptr [[A]], ptr [[B]], ptr [[C]])
; CHECK-NEXT: ret void
;
entry:
%a = alloca i32, align 4
%b = alloca i32, align 4
%c = alloca i32, align 4
- store i32 2, i32* %a, align 4
- store i32 3, i32* %b, align 4
- store i32 4, i32* %c, align 4
- %al = load i32, i32* %a
- %bl = load i32, i32* %b
- %cl = load i32, i32* %c
+ store i32 2, ptr %a, align 4
+ store i32 3, ptr %b, align 4
+ store i32 4, ptr %c, align 4
+ %al = load i32, ptr %a
+ %bl = load i32, ptr %b
+ %cl = load i32, ptr %c
ret void
}
; CHECK-NEXT: [[C:%.*]] = alloca i32, align 4
; CHECK-NEXT: br label [[NEW_BLOCK:%.*]]
; CHECK: new_block:
-; CHECK-NEXT: store i32 2, i32* [[A]], align 4
-; CHECK-NEXT: store i32 3, i32* [[B]], align 4
-; CHECK-NEXT: store i32 4, i32* [[C]], align 4
-; CHECK-NEXT: [[AL:%.*]] = load i32, i32* [[A]], align 4
-; CHECK-NEXT: [[BL:%.*]] = load i32, i32* [[B]], align 4
-; CHECK-NEXT: [[CL:%.*]] = load i32, i32* [[C]], align 4
+; CHECK-NEXT: store i32 2, ptr [[A]], align 4
+; CHECK-NEXT: store i32 3, ptr [[B]], align 4
+; CHECK-NEXT: store i32 4, ptr [[C]], align 4
+; CHECK-NEXT: [[AL:%.*]] = load i32, ptr [[A]], align 4
+; CHECK-NEXT: [[BL:%.*]] = load i32, ptr [[B]], align 4
+; CHECK-NEXT: [[CL:%.*]] = load i32, ptr [[C]], align 4
; CHECK-NEXT: ret void
;
entry:
%c = alloca i32, align 4
br label %new_block
new_block:
- store i32 2, i32* %a, align 4
- store i32 3, i32* %b, align 4
- store i32 4, i32* %c, align 4
- %al = load i32, i32* %a
- %bl = load i32, i32* %b
- %cl = load i32, i32* %c
+ store i32 2, ptr %a, align 4
+ store i32 3, ptr %b, align 4
+ store i32 4, ptr %c, align 4
+ %al = load i32, ptr %a
+ %bl = load i32, ptr %b
+ %cl = load i32, ptr %c
ret void
}
-; CHECK: define internal void @[[FUNCTION_1]](i32* [[ARG0:%.*]], i32* [[ARG1:%.*]], i32* [[ARG2:%.*]])
+; CHECK: define internal void @[[FUNCTION_1]](ptr [[ARG0:%.*]], ptr [[ARG1:%.*]], ptr [[ARG2:%.*]])
; CHECK: entry_to_outline:
-; CHECK-NEXT: store i32 2, i32* [[ARG0]], align 4
-; CHECK-NEXT: store i32 3, i32* [[ARG1]], align 4
-; CHECK-NEXT: store i32 4, i32* [[ARG2]], align 4
-; CHECK-NEXT: [[AL:%.*]] = load i32, i32* [[ARG0]], align 4
-; CHECK-NEXT: [[BL:%.*]] = load i32, i32* [[ARG1]], align 4
-; CHECK-NEXT: [[CL:%.*]] = load i32, i32* [[ARG2]], align 4
+; CHECK-NEXT: store i32 2, ptr [[ARG0]], align 4
+; CHECK-NEXT: store i32 3, ptr [[ARG1]], align 4
+; CHECK-NEXT: store i32 4, ptr [[ARG2]], align 4
+; CHECK-NEXT: [[AL:%.*]] = load i32, ptr [[ARG0]], align 4
+; CHECK-NEXT: [[BL:%.*]] = load i32, ptr [[ARG1]], align 4
+; CHECK-NEXT: [[CL:%.*]] = load i32, ptr [[ARG2]], align 4
; Additionally, we check that the newly added bitcast instruction is excluded in
; further extractions.
-declare void @llvm.lifetime.start.p0i8(i64, i8* nocapture)
-declare void @llvm.lifetime.end.p0i8(i64, i8* nocapture)
+declare void @llvm.lifetime.start.p0(i64, ptr nocapture)
+declare void @llvm.lifetime.end.p0(i64, ptr nocapture)
define void @outline_bitcast_base() {
entry:
%b = alloca i32, align 4
%c = alloca i32, align 4
%d = alloca i32, align 4
- store i32 2, i32* %a, align 4
- store i32 3, i32* %b, align 4
- store i32 4, i32* %c, align 4
- %X = bitcast i32* %d to i8*
- %al = load i32, i32* %a
- %bl = load i32, i32* %b
- %cl = load i32, i32* %c
+ store i32 2, ptr %a, align 4
+ store i32 3, ptr %b, align 4
+ store i32 4, ptr %c, align 4
+ %al = load i32, ptr %a
+ %bl = load i32, ptr %b
+ %cl = load i32, ptr %c
ret void
}
%b = alloca i32, align 4
%c = alloca i32, align 4
%d = alloca i32, align 4
- store i32 2, i32* %a, align 4
- store i32 3, i32* %b, align 4
- store i32 4, i32* %c, align 4
- %X = bitcast i32* %d to i8*
- %al = load i32, i32* %a
- %bl = load i32, i32* %b
- %cl = load i32, i32* %c
- call void @llvm.lifetime.start.p0i8(i64 -1, i8* %X)
- %am = load i32, i32* %b
- %bm = load i32, i32* %a
- %cm = load i32, i32* %c
- call void @llvm.lifetime.end.p0i8(i64 -1, i8* %X)
+ store i32 2, ptr %a, align 4
+ store i32 3, ptr %b, align 4
+ store i32 4, ptr %c, align 4
+ %al = load i32, ptr %a
+ %bl = load i32, ptr %b
+ %cl = load i32, ptr %c
+ call void @llvm.lifetime.start.p0(i64 -1, ptr %d)
+ %am = load i32, ptr %b
+ %bm = load i32, ptr %a
+ %cm = load i32, ptr %c
+ call void @llvm.lifetime.end.p0(i64 -1, ptr %d)
ret void
}
define void @outline_bitcast_base2(i32 %a, i32 %b, i32 %c) {
entry:
%d = alloca i32, align 4
- %X = bitcast i32* %d to i8*
%al = add i32 %a, %b
%bl = add i32 %b, %a
%cl = add i32 %b, %c
%buffer = mul i32 %a, %b
- %Y = bitcast i32* %d to i8*
%am = add i32 %a, %b
%bm = add i32 %b, %a
%cm = add i32 %b, %c
- call void @llvm.lifetime.start.p0i8(i64 -1, i8* %X)
- call void @llvm.lifetime.end.p0i8(i64 -1, i8* %X)
+ call void @llvm.lifetime.start.p0(i64 -1, ptr %d)
+ call void @llvm.lifetime.end.p0(i64 -1, ptr %d)
ret void
}
; CHECK-NEXT: [[B:%.*]] = alloca i32, align 4
; CHECK-NEXT: [[C:%.*]] = alloca i32, align 4
; CHECK-NEXT: [[D:%.*]] = alloca i32, align 4
-; CHECK-NEXT: call void @outlined_ir_func_0(i32* [[A]], i32* [[B]], i32* [[C]], i32* [[D]])
+; CHECK-NEXT: call void @outlined_ir_func_0(ptr [[A]], ptr [[B]], ptr [[C]])
; CHECK-NEXT: ret void
;
;
; CHECK-NEXT: [[A:%.*]] = alloca i32, align 4
; CHECK-NEXT: [[B:%.*]] = alloca i32, align 4
; CHECK-NEXT: [[C:%.*]] = alloca i32, align 4
-; CHECK-NEXT: [[D:%.*]] = alloca i32, align 4
-; CHECK-NEXT: call void @outlined_ir_func_0(i32* [[A]], i32* [[B]], i32* [[C]], i32* [[D]])
-; CHECK-NEXT: [[LT_CAST1:%.*]] = bitcast i32* [[D]] to i8*
-; CHECK-NEXT: call void @llvm.lifetime.start.p0i8(i64 -1, i8* [[LT_CAST1]])
-; CHECK-NEXT: [[AM:%.*]] = load i32, i32* [[B]], align 4
-; CHECK-NEXT: [[BM:%.*]] = load i32, i32* [[A]], align 4
-; CHECK-NEXT: [[CM:%.*]] = load i32, i32* [[C]], align 4
-; CHECK-NEXT: [[LT_CAST:%.*]] = bitcast i32* [[D]] to i8*
-; CHECK-NEXT: call void @llvm.lifetime.end.p0i8(i64 -1, i8* [[LT_CAST]])
+; CHECK-NEXT: call void @outlined_ir_func_0(ptr [[A]], ptr [[B]], ptr [[C]])
+; CHECK-NEXT: [[AM:%.*]] = load i32, ptr [[B]], align 4
+; CHECK-NEXT: [[BM:%.*]] = load i32, ptr [[A]], align 4
+; CHECK-NEXT: [[CM:%.*]] = load i32, ptr [[C]], align 4
; CHECK-NEXT: ret void
;
;
; CHECK-LABEL: @outline_bitcast_base2(
; CHECK-NEXT: entry:
-; CHECK-NEXT: [[D:%.*]] = alloca i32, align 4
-; CHECK-NEXT: [[X:%.*]] = bitcast i32* [[D]] to i8*
-; CHECK-NEXT: [[AL:%.*]] = add i32 [[A:%.*]], [[B:%.*]]
-; CHECK-NEXT: [[BL:%.*]] = add i32 [[B]], [[A]]
-; CHECK-NEXT: [[CL:%.*]] = add i32 [[B]], [[C:%.*]]
+; CHECK-NEXT: call void @outlined_ir_func_1(i32 [[A:%.*]], i32 [[B:%.*]], i32 [[C:%.*]])
; CHECK-NEXT: [[BUFFER:%.*]] = mul i32 [[A]], [[B]]
-; CHECK-NEXT: [[Y:%.*]] = bitcast i32* [[D]] to i8*
-; CHECK-NEXT: [[AM:%.*]] = add i32 [[A]], [[B]]
-; CHECK-NEXT: [[BM:%.*]] = add i32 [[B]], [[A]]
-; CHECK-NEXT: [[CM:%.*]] = add i32 [[B]], [[C]]
-; CHECK-NEXT: [[LT_CAST1:%.*]] = bitcast i32* [[D]] to i8*
-; CHECK-NEXT: call void @llvm.lifetime.start.p0i8(i64 -1, i8* [[LT_CAST1]])
-; CHECK-NEXT: [[LT_CAST:%.*]] = bitcast i32* [[D]] to i8*
-; CHECK-NEXT: call void @llvm.lifetime.end.p0i8(i64 -1, i8* [[LT_CAST]])
+; CHECK-NEXT: call void @outlined_ir_func_1(i32 [[A]], i32 [[B]], i32 [[C]])
; CHECK-NEXT: ret void
;
;
; CHECK-NEXT: newFuncRoot:
; CHECK-NEXT: br label [[ENTRY_TO_OUTLINE:%.*]]
; CHECK: entry_to_outline:
-; CHECK-NEXT: store i32 2, i32* [[TMP0:%.*]], align 4
-; CHECK-NEXT: store i32 3, i32* [[TMP1:%.*]], align 4
-; CHECK-NEXT: store i32 4, i32* [[TMP2:%.*]], align 4
-; CHECK-NEXT: [[X:%.*]] = bitcast i32* [[TMP3:%.*]] to i8*
-; CHECK-NEXT: [[AL:%.*]] = load i32, i32* [[TMP0]], align 4
-; CHECK-NEXT: [[BL:%.*]] = load i32, i32* [[TMP1]], align 4
-; CHECK-NEXT: [[CL:%.*]] = load i32, i32* [[TMP2]], align 4
+; CHECK-NEXT: store i32 2, ptr [[TMP0:%.*]], align 4
+; CHECK-NEXT: store i32 3, ptr [[TMP1:%.*]], align 4
+; CHECK-NEXT: store i32 4, ptr [[TMP2:%.*]], align 4
+; CHECK-NEXT: [[AL:%.*]] = load i32, ptr [[TMP0]], align 4
+; CHECK-NEXT: [[BL:%.*]] = load i32, ptr [[TMP1]], align 4
+; CHECK-NEXT: [[CL:%.*]] = load i32, ptr [[TMP2]], align 4
+; CHECK-NEXT: br label [[ENTRY_AFTER_OUTLINE_EXITSTUB:%.*]]
+; CHECK: entry_after_outline.exitStub:
+; CHECK-NEXT: ret void
+;
+;
+; CHECK-LABEL: @outlined_ir_func_1(
+; CHECK-NEXT: newFuncRoot:
+; CHECK-NEXT: [[D:%.*]] = alloca i32, align 4
+; CHECK-NEXT: call void @llvm.lifetime.start.p0(i64 -1, ptr [[D]])
+; CHECK-NEXT: br label [[ENTRY_TO_OUTLINE:%.*]]
+; CHECK: entry_to_outline:
+; CHECK-NEXT: [[AL:%.*]] = add i32 [[TMP0:%.*]], [[TMP1:%.*]]
+; CHECK-NEXT: [[BL:%.*]] = add i32 [[TMP1]], [[TMP0]]
+; CHECK-NEXT: [[CL:%.*]] = add i32 [[TMP1]], [[TMP2:%.*]]
+; CHECK-NEXT: call void @llvm.lifetime.end.p0(i64 -1, ptr [[D]])
; CHECK-NEXT: br label [[ENTRY_AFTER_OUTLINE_EXITSTUB:%.*]]
; CHECK: entry_after_outline.exitStub:
; CHECK-NEXT: ret void
+;
%b2 = alloca i32, align 4
br label %block_2
block_2:
- %a2val = load i32, i32* %a
- %b2val = load i32, i32* %b
+ %a2val = load i32, ptr %a
+ %b2val = load i32, ptr %b
%add2 = add i32 2, %a2val
%mul2 = mul i32 2, %b2val
br label %block_5
block_3:
- %aval = load i32, i32* %a
- %bval = load i32, i32* %b
+ %aval = load i32, ptr %a
+ %bval = load i32, ptr %b
%add = add i32 2, %aval
%mul = mul i32 2, %bval
br label %block_4
block_4:
- store i32 %add, i32* %output, align 4
- store i32 %mul, i32* %result, align 4
+ store i32 %add, ptr %output, align 4
+ store i32 %mul, ptr %result, align 4
br label %block_6
block_5:
- store i32 %add2, i32* %output, align 4
- store i32 %mul2, i32* %result, align 4
+ store i32 %add2, ptr %output, align 4
+ store i32 %mul2, ptr %result, align 4
br label %block_6
dummy:
ret void
%b2 = alloca i32, align 4
br label %block_2
block_2:
- %a2val = load i32, i32* %a
- %b2val = load i32, i32* %b
+ %a2val = load i32, ptr %a
+ %b2val = load i32, ptr %b
%add2 = add i32 2, %a2val
%mul2 = mul i32 2, %b2val
br label %block_5
block_3:
- %aval = load i32, i32* %a
- %bval = load i32, i32* %b
+ %aval = load i32, ptr %a
+ %bval = load i32, ptr %b
%add = add i32 2, %aval
%mul = mul i32 2, %bval
br label %block_4
block_4:
- store i32 %add, i32* %output, align 4
- store i32 %mul, i32* %result, align 4
+ store i32 %add, ptr %output, align 4
+ store i32 %mul, ptr %result, align 4
br label %block_6
block_5:
- store i32 %add2, i32* %output, align 4
- store i32 %mul2, i32* %result, align 4
+ store i32 %add2, ptr %output, align 4
+ store i32 %mul2, ptr %result, align 4
br label %block_6
dummy:
ret void
; CHECK-NEXT: [[B2:%.*]] = alloca i32, align 4
; CHECK-NEXT: br label [[BLOCK_2]]
; CHECK: block_2:
-; CHECK-NEXT: [[LT_CAST:%.*]] = bitcast i32* [[DIFF_CE_LOC]] to i8*
-; CHECK-NEXT: call void @llvm.lifetime.start.p0i8(i64 -1, i8* [[LT_CAST]])
-; CHECK-NEXT: call void @outlined_ir_func_0(i32* [[A]], i32* [[B]], i32* [[OUTPUT]], i32* [[RESULT]], i32* [[DIFF_CE_LOC]])
-; CHECK-NEXT: [[DIFF_CE_RELOAD:%.*]] = load i32, i32* [[DIFF_CE_LOC]], align 4
-; CHECK-NEXT: call void @llvm.lifetime.end.p0i8(i64 -1, i8* [[LT_CAST]])
+; CHECK-NEXT: call void @llvm.lifetime.start.p0(i64 -1, ptr [[DIFF_CE_LOC]])
+; CHECK-NEXT: call void @outlined_ir_func_0(ptr [[A]], ptr [[B]], ptr [[OUTPUT]], ptr [[RESULT]], ptr [[DIFF_CE_LOC]])
+; CHECK-NEXT: [[DIFF_CE_RELOAD:%.*]] = load i32, ptr [[DIFF_CE_LOC]], align 4
+; CHECK-NEXT: call void @llvm.lifetime.end.p0(i64 -1, ptr [[DIFF_CE_LOC]])
; CHECK-NEXT: br label [[BLOCK_6:%.*]]
; CHECK: dummy:
; CHECK-NEXT: ret void
; CHECK-NEXT: [[B2:%.*]] = alloca i32, align 4
; CHECK-NEXT: br label [[BLOCK_2]]
; CHECK: block_2:
-; CHECK-NEXT: [[LT_CAST:%.*]] = bitcast i32* [[DIFF_CE_LOC]] to i8*
-; CHECK-NEXT: call void @llvm.lifetime.start.p0i8(i64 -1, i8* [[LT_CAST]])
-; CHECK-NEXT: call void @outlined_ir_func_0(i32* [[A]], i32* [[B]], i32* [[OUTPUT]], i32* [[RESULT]], i32* [[DIFF_CE_LOC]])
-; CHECK-NEXT: [[DIFF_CE_RELOAD:%.*]] = load i32, i32* [[DIFF_CE_LOC]], align 4
-; CHECK-NEXT: call void @llvm.lifetime.end.p0i8(i64 -1, i8* [[LT_CAST]])
+; CHECK-NEXT: call void @llvm.lifetime.start.p0(i64 -1, ptr [[DIFF_CE_LOC]])
+; CHECK-NEXT: call void @outlined_ir_func_0(ptr [[A]], ptr [[B]], ptr [[OUTPUT]], ptr [[RESULT]], ptr [[DIFF_CE_LOC]])
+; CHECK-NEXT: [[DIFF_CE_RELOAD:%.*]] = load i32, ptr [[DIFF_CE_LOC]], align 4
+; CHECK-NEXT: call void @llvm.lifetime.end.p0(i64 -1, ptr [[DIFF_CE_LOC]])
; CHECK-NEXT: br label [[BLOCK_6:%.*]]
; CHECK: dummy:
; CHECK-NEXT: ret void
; CHECK-NEXT: newFuncRoot:
; CHECK-NEXT: br label [[BLOCK_2_TO_OUTLINE:%.*]]
; CHECK: block_2_to_outline:
-; CHECK-NEXT: [[A2VAL:%.*]] = load i32, i32* [[TMP0:%.*]], align 4
-; CHECK-NEXT: [[B2VAL:%.*]] = load i32, i32* [[TMP1:%.*]], align 4
+; CHECK-NEXT: [[A2VAL:%.*]] = load i32, ptr [[TMP0:%.*]], align 4
+; CHECK-NEXT: [[B2VAL:%.*]] = load i32, ptr [[TMP1:%.*]], align 4
; CHECK-NEXT: [[ADD2:%.*]] = add i32 2, [[A2VAL]]
; CHECK-NEXT: [[MUL2:%.*]] = mul i32 2, [[B2VAL]]
; CHECK-NEXT: br label [[BLOCK_5:%.*]]
; CHECK: block_3:
-; CHECK-NEXT: [[AVAL:%.*]] = load i32, i32* [[TMP0]], align 4
-; CHECK-NEXT: [[BVAL:%.*]] = load i32, i32* [[TMP1]], align 4
+; CHECK-NEXT: [[AVAL:%.*]] = load i32, ptr [[TMP0]], align 4
+; CHECK-NEXT: [[BVAL:%.*]] = load i32, ptr [[TMP1]], align 4
; CHECK-NEXT: [[ADD:%.*]] = add i32 2, [[AVAL]]
; CHECK-NEXT: [[MUL:%.*]] = mul i32 2, [[BVAL]]
; CHECK-NEXT: br label [[BLOCK_4:%.*]]
; CHECK: block_4:
-; CHECK-NEXT: store i32 [[ADD]], i32* [[TMP2:%.*]], align 4
-; CHECK-NEXT: store i32 [[MUL]], i32* [[TMP3:%.*]], align 4
+; CHECK-NEXT: store i32 [[ADD]], ptr [[TMP2:%.*]], align 4
+; CHECK-NEXT: store i32 [[MUL]], ptr [[TMP3:%.*]], align 4
; CHECK-NEXT: br label [[BLOCK_6_SPLIT:%.*]]
; CHECK: block_5:
-; CHECK-NEXT: store i32 [[ADD2]], i32* [[TMP2]], align 4
-; CHECK-NEXT: store i32 [[MUL2]], i32* [[TMP3]], align 4
+; CHECK-NEXT: store i32 [[ADD2]], ptr [[TMP2]], align 4
+; CHECK-NEXT: store i32 [[MUL2]], ptr [[TMP3]], align 4
; CHECK-NEXT: br label [[BLOCK_6_SPLIT]]
; CHECK: block_6.split:
; CHECK-NEXT: [[DIFF_CE:%.*]] = phi i32 [ [[AVAL]], [[BLOCK_4]] ], [ [[A2VAL]], [[BLOCK_5]] ]
; CHECK-NEXT: br label [[BLOCK_6_EXITSTUB:%.*]]
; CHECK: block_6.exitStub:
-; CHECK-NEXT: store i32 [[DIFF_CE]], i32* [[TMP4:%.*]], align 4
+; CHECK-NEXT: store i32 [[DIFF_CE]], ptr [[TMP4:%.*]], align 4
; CHECK-NEXT: ret void
;
declare void @f1();
-define void @function1(void()* %func) {
+define void @function1(ptr %func) {
entry:
%a = alloca i32, align 4
%b = alloca i32, align 4
%c = alloca i32, align 4
- store i32 2, i32* %a, align 4
- store i32 3, i32* %b, align 4
- store i32 4, i32* %c, align 4
+ store i32 2, ptr %a, align 4
+ store i32 3, ptr %b, align 4
+ store i32 4, ptr %c, align 4
call void %func()
- %al = load i32, i32* %a
- %bl = load i32, i32* %b
- %cl = load i32, i32* %c
+ %al = load i32, ptr %a
+ %bl = load i32, ptr %b
+ %cl = load i32, ptr %c
ret void
}
%a = alloca i32, align 4
%b = alloca i32, align 4
%c = alloca i32, align 4
- store i32 2, i32* %a, align 4
- store i32 3, i32* %b, align 4
- store i32 4, i32* %c, align 4
+ store i32 2, ptr %a, align 4
+ store i32 3, ptr %b, align 4
+ store i32 4, ptr %c, align 4
call void @f1()
- %al = load i32, i32* %a
- %bl = load i32, i32* %b
- %cl = load i32, i32* %c
+ %al = load i32, ptr %a
+ %bl = load i32, ptr %b
+ %cl = load i32, ptr %c
ret void
}
; CHECK-LABEL: @function1(
; CHECK-NEXT: [[A:%.*]] = alloca i32, align 4
; CHECK-NEXT: [[B:%.*]] = alloca i32, align 4
; CHECK-NEXT: [[C:%.*]] = alloca i32, align 4
-; CHECK-NEXT: call void @outlined_ir_func_0(i32* [[A]], i32* [[B]], i32* [[C]], void ()* [[FUNC:%.*]])
+; CHECK-NEXT: call void @outlined_ir_func_0(ptr [[A]], ptr [[B]], ptr [[C]], ptr [[FUNC:%.*]])
; CHECK-NEXT: ret void
;
;
; CHECK-NEXT: [[A:%.*]] = alloca i32, align 4
; CHECK-NEXT: [[B:%.*]] = alloca i32, align 4
; CHECK-NEXT: [[C:%.*]] = alloca i32, align 4
-; CHECK-NEXT: call void @outlined_ir_func_0(i32* [[A]], i32* [[B]], i32* [[C]], void ()* @f1)
+; CHECK-NEXT: call void @outlined_ir_func_0(ptr [[A]], ptr [[B]], ptr [[C]], ptr @f1)
; CHECK-NEXT: ret void
;
;
; CHECK-NEXT: newFuncRoot:
; CHECK-NEXT: br label [[ENTRY_TO_OUTLINE:%.*]]
; CHECK: entry_to_outline:
-; CHECK-NEXT: store i32 2, i32* [[TMP0:%.*]], align 4
-; CHECK-NEXT: store i32 3, i32* [[TMP1:%.*]], align 4
-; CHECK-NEXT: store i32 4, i32* [[TMP2:%.*]], align 4
+; CHECK-NEXT: store i32 2, ptr [[TMP0:%.*]], align 4
+; CHECK-NEXT: store i32 3, ptr [[TMP1:%.*]], align 4
+; CHECK-NEXT: store i32 4, ptr [[TMP2:%.*]], align 4
; CHECK-NEXT: call void [[TMP3:%.*]]()
-; CHECK-NEXT: [[AL:%.*]] = load i32, i32* [[TMP0]], align 4
-; CHECK-NEXT: [[BL:%.*]] = load i32, i32* [[TMP1]], align 4
-; CHECK-NEXT: [[CL:%.*]] = load i32, i32* [[TMP2]], align 4
+; CHECK-NEXT: [[AL:%.*]] = load i32, ptr [[TMP0]], align 4
+; CHECK-NEXT: [[BL:%.*]] = load i32, ptr [[TMP1]], align 4
+; CHECK-NEXT: [[CL:%.*]] = load i32, ptr [[TMP2]], align 4
; CHECK-NEXT: br label [[ENTRY_AFTER_OUTLINE_EXITSTUB:%.*]]
; CHECK: entry_after_outline.exitStub:
; CHECK-NEXT: ret void
; This test checks that we do can outline calls, but only if they have the same
; function type and the same name.
-declare void @f1(i32*, i32*);
-declare void @f2(i32*, i32*);
+declare void @f1(ptr, ptr);
+declare void @f2(ptr, ptr);
define void @function1() {
; CHECK-LABEL: @function1(
; CHECK-NEXT: [[A:%.*]] = alloca i32, align 4
; CHECK-NEXT: [[B:%.*]] = alloca i32, align 4
; CHECK-NEXT: [[C:%.*]] = alloca i32, align 4
-; CHECK-NEXT: call void @outlined_ir_func_0(i32* [[A]], i32* [[B]], i32* [[C]])
+; CHECK-NEXT: call void @outlined_ir_func_0(ptr [[A]], ptr [[B]], ptr [[C]])
; CHECK-NEXT: ret void
;
entry:
%a = alloca i32, align 4
%b = alloca i32, align 4
%c = alloca i32, align 4
- store i32 2, i32* %a, align 4
- store i32 3, i32* %b, align 4
- store i32 4, i32* %c, align 4
- call void @f1(i32* %a, i32* %b)
- %al = load i32, i32* %a
- %bl = load i32, i32* %b
- %cl = load i32, i32* %c
+ store i32 2, ptr %a, align 4
+ store i32 3, ptr %b, align 4
+ store i32 4, ptr %c, align 4
+ call void @f1(ptr %a, ptr %b)
+ %al = load i32, ptr %a
+ %bl = load i32, ptr %b
+ %cl = load i32, ptr %c
ret void
}
; CHECK-NEXT: [[A:%.*]] = alloca i32, align 4
; CHECK-NEXT: [[B:%.*]] = alloca i32, align 4
; CHECK-NEXT: [[C:%.*]] = alloca i32, align 4
-; CHECK-NEXT: call void @outlined_ir_func_0(i32* [[A]], i32* [[B]], i32* [[C]])
+; CHECK-NEXT: call void @outlined_ir_func_0(ptr [[A]], ptr [[B]], ptr [[C]])
; CHECK-NEXT: ret void
;
entry:
%a = alloca i32, align 4
%b = alloca i32, align 4
%c = alloca i32, align 4
- store i32 2, i32* %a, align 4
- store i32 3, i32* %b, align 4
- store i32 4, i32* %c, align 4
- call void @f1(i32* %a, i32* %b)
- %al = load i32, i32* %a
- %bl = load i32, i32* %b
- %cl = load i32, i32* %c
+ store i32 2, ptr %a, align 4
+ store i32 3, ptr %b, align 4
+ store i32 4, ptr %c, align 4
+ call void @f1(ptr %a, ptr %b)
+ %al = load i32, ptr %a
+ %bl = load i32, ptr %b
+ %cl = load i32, ptr %c
ret void
}
; CHECK-NEXT: [[A:%.*]] = alloca i32, align 4
; CHECK-NEXT: [[B:%.*]] = alloca i32, align 4
; CHECK-NEXT: [[C:%.*]] = alloca i32, align 4
-; CHECK-NEXT: store i32 2, i32* [[A]], align 4
-; CHECK-NEXT: store i32 3, i32* [[B]], align 4
-; CHECK-NEXT: store i32 4, i32* [[C]], align 4
-; CHECK-NEXT: call void @f2(i32* [[A]], i32* [[B]])
-; CHECK-NEXT: [[AL:%.*]] = load i32, i32* [[A]], align 4
-; CHECK-NEXT: [[BL:%.*]] = load i32, i32* [[B]], align 4
-; CHECK-NEXT: [[CL:%.*]] = load i32, i32* [[C]], align 4
+; CHECK-NEXT: store i32 2, ptr [[A]], align 4
+; CHECK-NEXT: store i32 3, ptr [[B]], align 4
+; CHECK-NEXT: store i32 4, ptr [[C]], align 4
+; CHECK-NEXT: call void @f2(ptr [[A]], ptr [[B]])
+; CHECK-NEXT: [[AL:%.*]] = load i32, ptr [[A]], align 4
+; CHECK-NEXT: [[BL:%.*]] = load i32, ptr [[B]], align 4
+; CHECK-NEXT: [[CL:%.*]] = load i32, ptr [[C]], align 4
; CHECK-NEXT: ret void
;
entry:
%a = alloca i32, align 4
%b = alloca i32, align 4
%c = alloca i32, align 4
- store i32 2, i32* %a, align 4
- store i32 3, i32* %b, align 4
- store i32 4, i32* %c, align 4
- call void @f2(i32* %a, i32* %b)
- %al = load i32, i32* %a
- %bl = load i32, i32* %b
- %cl = load i32, i32* %c
+ store i32 2, ptr %a, align 4
+ store i32 3, ptr %b, align 4
+ store i32 4, ptr %c, align 4
+ call void @f2(ptr %a, ptr %b)
+ %al = load i32, ptr %a
+ %bl = load i32, ptr %b
+ %cl = load i32, ptr %c
ret void
}
-; CHECK: define internal void @outlined_ir_func_0(i32* [[ARG0:%.*]], i32* [[ARG1:%.*]], i32* [[ARG2:%.*]])
+; CHECK: define internal void @outlined_ir_func_0(ptr [[ARG0:%.*]], ptr [[ARG1:%.*]], ptr [[ARG2:%.*]])
; CHECK: entry_to_outline:
-; CHECK-NEXT: store i32 2, i32* [[ARG0]], align 4
-; CHECK-NEXT: store i32 3, i32* [[ARG1]], align 4
-; CHECK-NEXT: store i32 4, i32* [[ARG2]], align 4
-; CHECK-NEXT: call void @f1(i32* [[ARG0]], i32* [[ARG1]])
-; CHECK-NEXT: [[AL:%.*]] = load i32, i32* [[ARG0]], align 4
-; CHECK-NEXT: [[BL:%.*]] = load i32, i32* [[ARG1]], align 4
-; CHECK-NEXT: [[CL:%.*]] = load i32, i32* [[ARG2]], align 4
+; CHECK-NEXT: store i32 2, ptr [[ARG0]], align 4
+; CHECK-NEXT: store i32 3, ptr [[ARG1]], align 4
+; CHECK-NEXT: store i32 4, ptr [[ARG2]], align 4
+; CHECK-NEXT: call void @f1(ptr [[ARG0]], ptr [[ARG1]])
+; CHECK-NEXT: [[AL:%.*]] = load i32, ptr [[ARG0]], align 4
+; CHECK-NEXT: [[BL:%.*]] = load i32, ptr [[ARG1]], align 4
+; CHECK-NEXT: [[CL:%.*]] = load i32, ptr [[ARG2]], align 4
; This test checks that we do can outline calls, but only if they have the same
; function type and the same name.
-declare void @f1(i32*, i32*);
-declare void @f2(i32*, i32*);
+declare void @f1(ptr, ptr);
+declare void @f2(ptr, ptr);
define void @function1() {
entry:
%a = alloca i32, align 4
%b = alloca i32, align 4
%c = alloca i32, align 4
- store i32 2, i32* %a, align 4
- store i32 3, i32* %b, align 4
- store i32 4, i32* %c, align 4
- call void @f1(i32* %a, i32* %b)
- %al = load i32, i32* %a
- %bl = load i32, i32* %b
- %cl = load i32, i32* %c
+ store i32 2, ptr %a, align 4
+ store i32 3, ptr %b, align 4
+ store i32 4, ptr %c, align 4
+ call void @f1(ptr %a, ptr %b)
+ %al = load i32, ptr %a
+ %bl = load i32, ptr %b
+ %cl = load i32, ptr %c
ret void
}
%a = alloca i32, align 4
%b = alloca i32, align 4
%c = alloca i32, align 4
- store i32 2, i32* %a, align 4
- store i32 3, i32* %b, align 4
- store i32 4, i32* %c, align 4
- call void @f1(i32* %a, i32* %b)
- %al = load i32, i32* %a
- %bl = load i32, i32* %b
- %cl = load i32, i32* %c
+ store i32 2, ptr %a, align 4
+ store i32 3, ptr %b, align 4
+ store i32 4, ptr %c, align 4
+ call void @f1(ptr %a, ptr %b)
+ %al = load i32, ptr %a
+ %bl = load i32, ptr %b
+ %cl = load i32, ptr %c
ret void
}
%a = alloca i32, align 4
%b = alloca i32, align 4
%c = alloca i32, align 4
- store i32 2, i32* %a, align 4
- store i32 3, i32* %b, align 4
- store i32 4, i32* %c, align 4
- call void @f2(i32* %a, i32* %b)
- %al = load i32, i32* %a
- %bl = load i32, i32* %b
- %cl = load i32, i32* %c
+ store i32 2, ptr %a, align 4
+ store i32 3, ptr %b, align 4
+ store i32 4, ptr %c, align 4
+ call void @f2(ptr %a, ptr %b)
+ %al = load i32, ptr %a
+ %bl = load i32, ptr %b
+ %cl = load i32, ptr %c
ret void
}
; CHECK-NEXT: [[A:%.*]] = alloca i32, align 4
; CHECK-NEXT: [[B:%.*]] = alloca i32, align 4
; CHECK-NEXT: [[C:%.*]] = alloca i32, align 4
-; CHECK-NEXT: call void @outlined_ir_func_0(i32* [[A]], i32* [[B]], i32* [[C]], void (i32*, i32*)* @f1)
+; CHECK-NEXT: call void @outlined_ir_func_0(ptr [[A]], ptr [[B]], ptr [[C]], ptr @f1)
; CHECK-NEXT: ret void
;
;
; CHECK-NEXT: [[A:%.*]] = alloca i32, align 4
; CHECK-NEXT: [[B:%.*]] = alloca i32, align 4
; CHECK-NEXT: [[C:%.*]] = alloca i32, align 4
-; CHECK-NEXT: call void @outlined_ir_func_0(i32* [[A]], i32* [[B]], i32* [[C]], void (i32*, i32*)* @f1)
+; CHECK-NEXT: call void @outlined_ir_func_0(ptr [[A]], ptr [[B]], ptr [[C]], ptr @f1)
; CHECK-NEXT: ret void
;
;
; CHECK-NEXT: [[A:%.*]] = alloca i32, align 4
; CHECK-NEXT: [[B:%.*]] = alloca i32, align 4
; CHECK-NEXT: [[C:%.*]] = alloca i32, align 4
-; CHECK-NEXT: call void @outlined_ir_func_0(i32* [[A]], i32* [[B]], i32* [[C]], void (i32*, i32*)* @f2)
+; CHECK-NEXT: call void @outlined_ir_func_0(ptr [[A]], ptr [[B]], ptr [[C]], ptr @f2)
; CHECK-NEXT: ret void
;
;
; CHECK-NEXT: newFuncRoot:
; CHECK-NEXT: br label [[ENTRY_TO_OUTLINE:%.*]]
; CHECK: entry_to_outline:
-; CHECK-NEXT: store i32 2, i32* [[TMP0:%.*]], align 4
-; CHECK-NEXT: store i32 3, i32* [[TMP1:%.*]], align 4
-; CHECK-NEXT: store i32 4, i32* [[TMP2:%.*]], align 4
-; CHECK-NEXT: call void [[TMP3:%.*]](i32* [[TMP0]], i32* [[TMP1]])
-; CHECK-NEXT: [[AL:%.*]] = load i32, i32* [[TMP0]], align 4
-; CHECK-NEXT: [[BL:%.*]] = load i32, i32* [[TMP1]], align 4
-; CHECK-NEXT: [[CL:%.*]] = load i32, i32* [[TMP2]], align 4
+; CHECK-NEXT: store i32 2, ptr [[TMP0:%.*]], align 4
+; CHECK-NEXT: store i32 3, ptr [[TMP1:%.*]], align 4
+; CHECK-NEXT: store i32 4, ptr [[TMP2:%.*]], align 4
+; CHECK-NEXT: call void [[TMP3:%.*]](ptr [[TMP0]], ptr [[TMP1]])
+; CHECK-NEXT: [[AL:%.*]] = load i32, ptr [[TMP0]], align 4
+; CHECK-NEXT: [[BL:%.*]] = load i32, ptr [[TMP1]], align 4
+; CHECK-NEXT: [[CL:%.*]] = load i32, ptr [[TMP2]], align 4
; CHECK-NEXT: br label [[ENTRY_AFTER_OUTLINE_EXITSTUB:%.*]]
; CHECK: entry_after_outline.exitStub:
; CHECK-NEXT: ret void
; CHECK-NEXT: [[A:%.*]] = alloca double, align 4
; CHECK-NEXT: [[B:%.*]] = alloca double, align 4
; CHECK-NEXT: [[C:%.*]] = alloca double, align 4
-; CHECK-NEXT: call void @outlined_ir_func_0(double* [[A]], double* [[B]], double* [[C]])
+; CHECK-NEXT: call void @outlined_ir_func_0(ptr [[A]], ptr [[B]], ptr [[C]])
; CHECK-NEXT: ret void
;
entry:
%a = alloca double, align 4
%b = alloca double, align 4
%c = alloca double, align 4
- store double 2.0, double* %a, align 4
- store double 3.0, double* %b, align 4
- store double 4.0, double* %c, align 4
- %al = load double, double* %a
- %bl = load double, double* %b
- %cl = load double, double* %c
+ store double 2.0, ptr %a, align 4
+ store double 3.0, ptr %b, align 4
+ store double 4.0, ptr %c, align 4
+ %al = load double, ptr %a
+ %bl = load double, ptr %b
+ %cl = load double, ptr %c
%0 = fadd double %al, %bl
%1 = fadd double %al, %cl
%2 = fadd double %bl, %cl
; CHECK-NEXT: [[A:%.*]] = alloca double, align 4
; CHECK-NEXT: [[B:%.*]] = alloca double, align 4
; CHECK-NEXT: [[C:%.*]] = alloca double, align 4
-; CHECK-NEXT: call void @outlined_ir_func_0(double* [[A]], double* [[B]], double* [[C]])
+; CHECK-NEXT: call void @outlined_ir_func_0(ptr [[A]], ptr [[B]], ptr [[C]])
; CHECK-NEXT: ret void
;
entry:
%a = alloca double, align 4
%b = alloca double, align 4
%c = alloca double, align 4
- store double 2.0, double* %a, align 4
- store double 3.0, double* %b, align 4
- store double 4.0, double* %c, align 4
- %al = load double, double* %a
- %bl = load double, double* %b
- %cl = load double, double* %c
+ store double 2.0, ptr %a, align 4
+ store double 3.0, ptr %b, align 4
+ store double 4.0, ptr %c, align 4
+ %al = load double, ptr %a
+ %bl = load double, ptr %b
+ %cl = load double, ptr %c
%0 = fadd double %al, %bl
%1 = fadd double %al, %cl
%2 = fadd double %bl, %cl
; CHECK-NEXT: [[A:%.*]] = alloca double, align 4
; CHECK-NEXT: [[B:%.*]] = alloca double, align 4
; CHECK-NEXT: [[C:%.*]] = alloca double, align 4
-; CHECK-NEXT: store double 2.000000e+00, double* [[A]], align 4
-; CHECK-NEXT: store double 3.000000e+00, double* [[B]], align 4
-; CHECK-NEXT: store double 4.000000e+00, double* [[C]], align 4
-; CHECK-NEXT: [[AL:%.*]] = load double, double* [[A]], align 8
-; CHECK-NEXT: [[BL:%.*]] = load double, double* [[B]], align 8
-; CHECK-NEXT: [[CL:%.*]] = load double, double* [[C]], align 8
+; CHECK-NEXT: store double 2.000000e+00, ptr [[A]], align 4
+; CHECK-NEXT: store double 3.000000e+00, ptr [[B]], align 4
+; CHECK-NEXT: store double 4.000000e+00, ptr [[C]], align 4
+; CHECK-NEXT: [[AL:%.*]] = load double, ptr [[A]], align 8
+; CHECK-NEXT: [[BL:%.*]] = load double, ptr [[B]], align 8
+; CHECK-NEXT: [[CL:%.*]] = load double, ptr [[C]], align 8
; CHECK-NEXT: [[TMP0:%.*]] = fadd double [[BL]], [[AL]]
; CHECK-NEXT: [[TMP1:%.*]] = fadd double [[CL]], [[AL]]
; CHECK-NEXT: [[TMP2:%.*]] = fadd double [[CL]], [[BL]]
%a = alloca double, align 4
%b = alloca double, align 4
%c = alloca double, align 4
- store double 2.0, double* %a, align 4
- store double 3.0, double* %b, align 4
- store double 4.0, double* %c, align 4
- %al = load double, double* %a
- %bl = load double, double* %b
- %cl = load double, double* %c
+ store double 2.0, ptr %a, align 4
+ store double 3.0, ptr %b, align 4
+ store double 4.0, ptr %c, align 4
+ %al = load double, ptr %a
+ %bl = load double, ptr %b
+ %cl = load double, ptr %c
%0 = fadd double %bl, %al
%1 = fadd double %cl, %al
%2 = fadd double %cl, %bl
ret void
}
-; CHECK: define internal void @outlined_ir_func_0(double* [[ARG0:%.*]], double* [[ARG1:%.*]], double* [[ARG2:%.*]]) #0 {
+; CHECK: define internal void @outlined_ir_func_0(ptr [[ARG0:%.*]], ptr [[ARG1:%.*]], ptr [[ARG2:%.*]]) #0 {
; CHECK: entry_to_outline:
-; CHECK-NEXT: store double 2.000000e+00, double* [[ARG0]], align 4
-; CHECK-NEXT: store double 3.000000e+00, double* [[ARG1]], align 4
-; CHECK-NEXT: store double 4.000000e+00, double* [[ARG2]], align 4
-; CHECK-NEXT: [[AL:%.*]] = load double, double* [[ARG0]], align 8
-; CHECK-NEXT: [[BL:%.*]] = load double, double* [[ARG1]], align 8
-; CHECK-NEXT: [[CL:%.*]] = load double, double* [[ARG2]], align 8
+; CHECK-NEXT: store double 2.000000e+00, ptr [[ARG0]], align 4
+; CHECK-NEXT: store double 3.000000e+00, ptr [[ARG1]], align 4
+; CHECK-NEXT: store double 4.000000e+00, ptr [[ARG2]], align 4
+; CHECK-NEXT: [[AL:%.*]] = load double, ptr [[ARG0]], align 8
+; CHECK-NEXT: [[BL:%.*]] = load double, ptr [[ARG1]], align 8
+; CHECK-NEXT: [[CL:%.*]] = load double, ptr [[ARG2]], align 8
; CHECK-NEXT: [[TMP0:%.*]] = fadd double [[AL]], [[BL]]
; CHECK-NEXT: [[TMP1:%.*]] = fadd double [[AL]], [[CL]]
; CHECK-NEXT: [[TMP2:%.*]] = fadd double [[BL]], [[CL]]
; CHECK-NEXT: [[A:%.*]] = alloca i32, align 4
; CHECK-NEXT: [[B:%.*]] = alloca i32, align 4
; CHECK-NEXT: [[C:%.*]] = alloca i32, align 4
-; CHECK-NEXT: call void @outlined_ir_func_0(i32* [[A]], i32* [[B]], i32* [[C]])
+; CHECK-NEXT: call void @outlined_ir_func_0(ptr [[A]], ptr [[B]], ptr [[C]])
; CHECK-NEXT: ret void
;
entry:
%a = alloca i32, align 4
%b = alloca i32, align 4
%c = alloca i32, align 4
- store i32 2, i32* %a, align 4
- store i32 3, i32* %b, align 4
- store i32 4, i32* %c, align 4
- %al = load i32, i32* %a
- %bl = load i32, i32* %b
- %cl = load i32, i32* %c
+ store i32 2, ptr %a, align 4
+ store i32 3, ptr %b, align 4
+ store i32 4, ptr %c, align 4
+ %al = load i32, ptr %a
+ %bl = load i32, ptr %b
+ %cl = load i32, ptr %c
%0 = add i32 %al, %bl
%1 = add i32 %al, %cl
%2 = add i32 %bl, %cl
; CHECK-NEXT: [[A:%.*]] = alloca i32, align 4
; CHECK-NEXT: [[B:%.*]] = alloca i32, align 4
; CHECK-NEXT: [[C:%.*]] = alloca i32, align 4
-; CHECK-NEXT: call void @outlined_ir_func_0(i32* [[A]], i32* [[B]], i32* [[C]])
+; CHECK-NEXT: call void @outlined_ir_func_0(ptr [[A]], ptr [[B]], ptr [[C]])
; CHECK-NEXT: ret void
;
entry:
%a = alloca i32, align 4
%b = alloca i32, align 4
%c = alloca i32, align 4
- store i32 2, i32* %a, align 4
- store i32 3, i32* %b, align 4
- store i32 4, i32* %c, align 4
- %al = load i32, i32* %a
- %bl = load i32, i32* %b
- %cl = load i32, i32* %c
+ store i32 2, ptr %a, align 4
+ store i32 3, ptr %b, align 4
+ store i32 4, ptr %c, align 4
+ %al = load i32, ptr %a
+ %bl = load i32, ptr %b
+ %cl = load i32, ptr %c
%0 = add i32 %al, %bl
%1 = add i32 %al, %cl
%2 = add i32 %bl, %cl
; CHECK-NEXT: [[A:%.*]] = alloca i32, align 4
; CHECK-NEXT: [[B:%.*]] = alloca i32, align 4
; CHECK-NEXT: [[C:%.*]] = alloca i32, align 4
-; CHECK-NEXT: call void @outlined_ir_func_0(i32* [[A]], i32* [[B]], i32* [[C]])
+; CHECK-NEXT: call void @outlined_ir_func_0(ptr [[A]], ptr [[B]], ptr [[C]])
; CHECK-NEXT: ret void
;
entry:
%a = alloca i32, align 4
%b = alloca i32, align 4
%c = alloca i32, align 4
- store i32 2, i32* %a, align 4
- store i32 3, i32* %b, align 4
- store i32 4, i32* %c, align 4
- %al = load i32, i32* %a
- %bl = load i32, i32* %b
- %cl = load i32, i32* %c
+ store i32 2, ptr %a, align 4
+ store i32 3, ptr %b, align 4
+ store i32 4, ptr %c, align 4
+ %al = load i32, ptr %a
+ %bl = load i32, ptr %b
+ %cl = load i32, ptr %c
%0 = add i32 %bl, %al
%1 = add i32 %cl, %al
%2 = add i32 %cl, %bl
; CHECK-NEXT: [[A:%.*]] = alloca i32, align 4
; CHECK-NEXT: [[B:%.*]] = alloca i32, align 4
; CHECK-NEXT: [[C:%.*]] = alloca i32, align 4
-; CHECK-NEXT: call void @outlined_ir_func_0(i32* [[A]], i32* [[B]], i32* [[C]])
+; CHECK-NEXT: call void @outlined_ir_func_0(ptr [[A]], ptr [[B]], ptr [[C]])
; CHECK-NEXT: ret void
;
entry:
%a = alloca i32, align 4
%b = alloca i32, align 4
%c = alloca i32, align 4
- store i32 2, i32* %a, align 4
- store i32 3, i32* %b, align 4
- store i32 4, i32* %c, align 4
- %al = load i32, i32* %a
- %bl = load i32, i32* %b
- %cl = load i32, i32* %c
+ store i32 2, ptr %a, align 4
+ store i32 3, ptr %b, align 4
+ store i32 4, ptr %c, align 4
+ %al = load i32, ptr %a
+ %bl = load i32, ptr %b
+ %cl = load i32, ptr %c
%0 = add i32 %bl, %al
%1 = add i32 %cl, %al
%2 = add i32 %cl, %bl
; CHECK-NEXT: [[A:%.*]] = alloca i32, align 4
; CHECK-NEXT: [[B:%.*]] = alloca i32, align 4
; CHECK-NEXT: [[C:%.*]] = alloca i32, align 4
-; CHECK-NEXT: call void @outlined_ir_func_2(i32* [[A]], i32* [[B]], i32* [[C]])
+; CHECK-NEXT: call void @outlined_ir_func_2(ptr [[A]], ptr [[B]], ptr [[C]])
; CHECK-NEXT: ret void
;
entry:
%a = alloca i32, align 4
%b = alloca i32, align 4
%c = alloca i32, align 4
- store i32 2, i32* %a, align 4
- store i32 3, i32* %b, align 4
- store i32 4, i32* %c, align 4
- %al = load i32, i32* %a
- %bl = load i32, i32* %b
- %cl = load i32, i32* %c
+ store i32 2, ptr %a, align 4
+ store i32 3, ptr %b, align 4
+ store i32 4, ptr %c, align 4
+ %al = load i32, ptr %a
+ %bl = load i32, ptr %b
+ %cl = load i32, ptr %c
%0 = sub i32 %al, %bl
%1 = sub i32 %al, %cl
%2 = sub i32 %bl, %cl
; CHECK-NEXT: [[A:%.*]] = alloca i32, align 4
; CHECK-NEXT: [[B:%.*]] = alloca i32, align 4
; CHECK-NEXT: [[C:%.*]] = alloca i32, align 4
-; CHECK-NEXT: call void @outlined_ir_func_2(i32* [[A]], i32* [[B]], i32* [[C]])
+; CHECK-NEXT: call void @outlined_ir_func_2(ptr [[A]], ptr [[B]], ptr [[C]])
; CHECK-NEXT: ret void
;
entry:
%a = alloca i32, align 4
%b = alloca i32, align 4
%c = alloca i32, align 4
- store i32 2, i32* %a, align 4
- store i32 3, i32* %b, align 4
- store i32 4, i32* %c, align 4
- %al = load i32, i32* %a
- %bl = load i32, i32* %b
- %cl = load i32, i32* %c
+ store i32 2, ptr %a, align 4
+ store i32 3, ptr %b, align 4
+ store i32 4, ptr %c, align 4
+ %al = load i32, ptr %a
+ %bl = load i32, ptr %b
+ %cl = load i32, ptr %c
%0 = sub i32 %al, %bl
%1 = sub i32 %al, %cl
%2 = sub i32 %bl, %cl
; CHECK-NEXT: [[A:%.*]] = alloca i32, align 4
; CHECK-NEXT: [[B:%.*]] = alloca i32, align 4
; CHECK-NEXT: [[C:%.*]] = alloca i32, align 4
-; CHECK-NEXT: call void @outlined_ir_func_1(i32* [[A]], i32* [[B]], i32* [[C]])
+; CHECK-NEXT: call void @outlined_ir_func_1(ptr [[A]], ptr [[B]], ptr [[C]])
; CHECK-NEXT: ret void
;
entry:
%a = alloca i32, align 4
%b = alloca i32, align 4
%c = alloca i32, align 4
- store i32 2, i32* %a, align 4
- store i32 3, i32* %b, align 4
- store i32 4, i32* %c, align 4
- %al = load i32, i32* %a
- %bl = load i32, i32* %b
- %cl = load i32, i32* %c
+ store i32 2, ptr %a, align 4
+ store i32 3, ptr %b, align 4
+ store i32 4, ptr %c, align 4
+ %al = load i32, ptr %a
+ %bl = load i32, ptr %b
+ %cl = load i32, ptr %c
%0 = sub i32 %bl, %al
%1 = sub i32 %cl, %al
%2 = sub i32 %cl, %bl
; CHECK-NEXT: [[A:%.*]] = alloca i32, align 4
; CHECK-NEXT: [[B:%.*]] = alloca i32, align 4
; CHECK-NEXT: [[C:%.*]] = alloca i32, align 4
-; CHECK-NEXT: call void @outlined_ir_func_1(i32* [[A]], i32* [[B]], i32* [[C]])
+; CHECK-NEXT: call void @outlined_ir_func_1(ptr [[A]], ptr [[B]], ptr [[C]])
; CHECK-NEXT: ret void
;
entry:
%a = alloca i32, align 4
%b = alloca i32, align 4
%c = alloca i32, align 4
- store i32 2, i32* %a, align 4
- store i32 3, i32* %b, align 4
- store i32 4, i32* %c, align 4
- %al = load i32, i32* %a
- %bl = load i32, i32* %b
- %cl = load i32, i32* %c
+ store i32 2, ptr %a, align 4
+ store i32 3, ptr %b, align 4
+ store i32 4, ptr %c, align 4
+ %al = load i32, ptr %a
+ %bl = load i32, ptr %b
+ %cl = load i32, ptr %c
%0 = sub i32 %bl, %al
%1 = sub i32 %cl, %al
%2 = sub i32 %cl, %bl
ret void
}
-; CHECK: define internal void @outlined_ir_func_0(i32* [[ARG0:%.*]], i32* [[ARG1:%.*]], i32* [[ARG2:%.*]]) #0 {
+; CHECK: define internal void @outlined_ir_func_0(ptr [[ARG0:%.*]], ptr [[ARG1:%.*]], ptr [[ARG2:%.*]]) #0 {
; CHECK: entry_to_outline:
-; CHECK-NEXT: store i32 2, i32* [[ARG0]], align 4
-; CHECK-NEXT: store i32 3, i32* [[ARG1]], align 4
-; CHECK-NEXT: store i32 4, i32* [[ARG2]], align 4
-; CHECK-NEXT: [[AL:%.*]] = load i32, i32* [[ARG0]], align 4
-; CHECK-NEXT: [[BL:%.*]] = load i32, i32* [[ARG1]], align 4
-; CHECK-NEXT: [[CL:%.*]] = load i32, i32* [[ARG2]], align 4
+; CHECK-NEXT: store i32 2, ptr [[ARG0]], align 4
+; CHECK-NEXT: store i32 3, ptr [[ARG1]], align 4
+; CHECK-NEXT: store i32 4, ptr [[ARG2]], align 4
+; CHECK-NEXT: [[AL:%.*]] = load i32, ptr [[ARG0]], align 4
+; CHECK-NEXT: [[BL:%.*]] = load i32, ptr [[ARG1]], align 4
+; CHECK-NEXT: [[CL:%.*]] = load i32, ptr [[ARG2]], align 4
; CHECK-NEXT: [[TMP0:%.*]] = add i32 [[AL]], [[BL]]
; CHECK-NEXT: [[TMP1:%.*]] = add i32 [[AL]], [[CL]]
; CHECK-NEXT: [[TMP2:%.*]] = add i32 [[BL]], [[CL]]
-; CHECK: define internal void @outlined_ir_func_1(i32* [[ARG0:%.*]], i32* [[ARG1:%.*]], i32* [[ARG2:%.*]]) #0 {
+; CHECK: define internal void @outlined_ir_func_1(ptr [[ARG0:%.*]], ptr [[ARG1:%.*]], ptr [[ARG2:%.*]]) #0 {
; CHECK: entry_to_outline:
-; CHECK-NEXT: store i32 2, i32* [[ARG0]], align 4
-; CHECK-NEXT: store i32 3, i32* [[ARG1]], align 4
-; CHECK-NEXT: store i32 4, i32* [[ARG2]], align 4
-; CHECK-NEXT: [[AL:%.*]] = load i32, i32* [[ARG0]], align 4
-; CHECK-NEXT: [[BL:%.*]] = load i32, i32* [[ARG1]], align 4
-; CHECK-NEXT: [[CL:%.*]] = load i32, i32* [[ARG2]], align 4
+; CHECK-NEXT: store i32 2, ptr [[ARG0]], align 4
+; CHECK-NEXT: store i32 3, ptr [[ARG1]], align 4
+; CHECK-NEXT: store i32 4, ptr [[ARG2]], align 4
+; CHECK-NEXT: [[AL:%.*]] = load i32, ptr [[ARG0]], align 4
+; CHECK-NEXT: [[BL:%.*]] = load i32, ptr [[ARG1]], align 4
+; CHECK-NEXT: [[CL:%.*]] = load i32, ptr [[ARG2]], align 4
; CHECK-NEXT: [[TMP0:%.*]] = sub i32 [[BL]], [[AL]]
; CHECK-NEXT: [[TMP1:%.*]] = sub i32 [[CL]], [[AL]]
; CHECK-NEXT: [[TMP2:%.*]] = sub i32 [[CL]], [[BL]]
-; CHECK: define internal void @outlined_ir_func_2(i32* [[ARG0:%.*]], i32* [[ARG1:%.*]], i32* [[ARG2:%.*]]) #0 {
+; CHECK: define internal void @outlined_ir_func_2(ptr [[ARG0:%.*]], ptr [[ARG1:%.*]], ptr [[ARG2:%.*]]) #0 {
; CHECK: entry_to_outline:
-; CHECK-NEXT: store i32 2, i32* [[ARG0]], align 4
-; CHECK-NEXT: store i32 3, i32* [[ARG1]], align 4
-; CHECK-NEXT: store i32 4, i32* [[ARG2]], align 4
-; CHECK-NEXT: [[AL:%.*]] = load i32, i32* [[ARG0]], align 4
-; CHECK-NEXT: [[BL:%.*]] = load i32, i32* [[ARG1]], align 4
-; CHECK-NEXT: [[CL:%.*]] = load i32, i32* [[ARG2]], align 4
+; CHECK-NEXT: store i32 2, ptr [[ARG0]], align 4
+; CHECK-NEXT: store i32 3, ptr [[ARG1]], align 4
+; CHECK-NEXT: store i32 4, ptr [[ARG2]], align 4
+; CHECK-NEXT: [[AL:%.*]] = load i32, ptr [[ARG0]], align 4
+; CHECK-NEXT: [[BL:%.*]] = load i32, ptr [[ARG1]], align 4
+; CHECK-NEXT: [[CL:%.*]] = load i32, ptr [[ARG2]], align 4
; CHECK-NEXT: [[TMP0:%.*]] = sub i32 [[AL]], [[BL]]
; CHECK-NEXT: [[TMP1:%.*]] = sub i32 [[AL]], [[CL]]
; CHECK-NEXT: [[TMP2:%.*]] = sub i32 [[BL]], [[CL]]
; CHECK-NEXT: [[A:%.*]] = alloca i32, align 4
; CHECK-NEXT: [[B:%.*]] = alloca i32, align 4
; CHECK-NEXT: [[C:%.*]] = alloca i32, align 4
-; CHECK-NEXT: call void @outlined_ir_func_1(i32* [[A]], i32* [[B]], i32* [[C]])
+; CHECK-NEXT: call void @outlined_ir_func_1(ptr [[A]], ptr [[B]], ptr [[C]])
; CHECK-NEXT: ret void
;
entry:
%a = alloca i32, align 4
%b = alloca i32, align 4
%c = alloca i32, align 4
- store i32 2, i32* %a, align 4
- store i32 3, i32* %b, align 4
- store i32 4, i32* %c, align 4
- %al = load i32, i32* %a
- %bl = load i32, i32* %b
- %cl = load i32, i32* %c
+ store i32 2, ptr %a, align 4
+ store i32 3, ptr %b, align 4
+ store i32 4, ptr %c, align 4
+ %al = load i32, ptr %a
+ %bl = load i32, ptr %b
+ %cl = load i32, ptr %c
ret void
}
; CHECK-NEXT: [[A:%.*]] = alloca i32, align 4
; CHECK-NEXT: [[B:%.*]] = alloca i32, align 4
; CHECK-NEXT: [[C:%.*]] = alloca i32, align 4
-; CHECK-NEXT: call void @outlined_ir_func_1(i32* [[A]], i32* [[B]], i32* [[C]])
+; CHECK-NEXT: call void @outlined_ir_func_1(ptr [[A]], ptr [[B]], ptr [[C]])
; CHECK-NEXT: ret void
;
entry:
%a = alloca i32, align 4
%b = alloca i32, align 4
%c = alloca i32, align 4
- store i32 2, i32* %a, align 4
- store i32 3, i32* %b, align 4
- store i32 4, i32* %c, align 4
- %al = load i32, i32* %a
- %bl = load i32, i32* %b
- %cl = load i32, i32* %c
+ store i32 2, ptr %a, align 4
+ store i32 3, ptr %b, align 4
+ store i32 4, ptr %c, align 4
+ %al = load i32, ptr %a
+ %bl = load i32, ptr %b
+ %cl = load i32, ptr %c
ret void
}
; CHECK-NEXT: [[A:%.*]] = alloca float, align 4
; CHECK-NEXT: [[B:%.*]] = alloca float, align 4
; CHECK-NEXT: [[C:%.*]] = alloca float, align 4
-; CHECK-NEXT: call void @outlined_ir_func_0(float* [[A]], float* [[B]], float* [[C]])
+; CHECK-NEXT: call void @outlined_ir_func_0(ptr [[A]], ptr [[B]], ptr [[C]])
; CHECK-NEXT: ret void
;
entry:
%a = alloca float, align 4
%b = alloca float, align 4
%c = alloca float, align 4
- store float 2.0, float* %a, align 4
- store float 3.0, float* %b, align 4
- store float 4.0, float* %c, align 4
- %al = load float, float* %a
- %bl = load float, float* %b
- %cl = load float, float* %c
+ store float 2.0, ptr %a, align 4
+ store float 3.0, ptr %b, align 4
+ store float 4.0, ptr %c, align 4
+ %al = load float, ptr %a
+ %bl = load float, ptr %b
+ %cl = load float, ptr %c
%0 = fmul float %al, %bl
ret void
}
; CHECK-NEXT: [[A:%.*]] = alloca float, align 4
; CHECK-NEXT: [[B:%.*]] = alloca float, align 4
; CHECK-NEXT: [[C:%.*]] = alloca float, align 4
-; CHECK-NEXT: call void @outlined_ir_func_0(float* [[A]], float* [[B]], float* [[C]])
+; CHECK-NEXT: call void @outlined_ir_func_0(ptr [[A]], ptr [[B]], ptr [[C]])
; CHECK-NEXT: ret void
;
entry:
%a = alloca float, align 4
%b = alloca float, align 4
%c = alloca float, align 4
- store float 2.0, float* %a, align 4
- store float 3.0, float* %b, align 4
- store float 4.0, float* %c, align 4
- %al = load float, float* %a
- %bl = load float, float* %b
- %cl = load float, float* %c
+ store float 2.0, ptr %a, align 4
+ store float 3.0, ptr %b, align 4
+ store float 4.0, ptr %c, align 4
+ %al = load float, ptr %a
+ %bl = load float, ptr %b
+ %cl = load float, ptr %c
%0 = fmul float %al, %bl
ret void
}
attributes #0 = { "no-nans-fp-math"="true" "no-signed-zeros-fp-math"="true" "less-precise-fpmad"="true"
"unsafe-fp-math"="true" "no-infs-fp-math"="true"}
-; CHECK: define internal void @outlined_ir_func_0(float* [[ARG0:%.*]], float* [[ARG1:%.*]], float* [[ARG2:%.*]]) [[ATTR1:#[0-9]+]] {
+; CHECK: define internal void @outlined_ir_func_0(ptr [[ARG0:%.*]], ptr [[ARG1:%.*]], ptr [[ARG2:%.*]]) [[ATTR1:#[0-9]+]] {
; CHECK: entry_to_outline:
-; CHECK-NEXT: store float 2.000000e+00, float* [[ARG0]], align 4
-; CHECK-NEXT: store float 3.000000e+00, float* [[ARG1]], align 4
-; CHECK-NEXT: store float 4.000000e+00, float* [[ARG2]], align 4
-; CHECK-NEXT: [[AL:%.*]] = load float, float* [[ARG0]], align 4
-; CHECK-NEXT: [[BL:%.*]] = load float, float* [[ARG1]], align 4
-; CHECK-NEXT: [[CL:%.*]] = load float, float* [[ARG2]], align 4
+; CHECK-NEXT: store float 2.000000e+00, ptr [[ARG0]], align 4
+; CHECK-NEXT: store float 3.000000e+00, ptr [[ARG1]], align 4
+; CHECK-NEXT: store float 4.000000e+00, ptr [[ARG2]], align 4
+; CHECK-NEXT: [[AL:%.*]] = load float, ptr [[ARG0]], align 4
+; CHECK-NEXT: [[BL:%.*]] = load float, ptr [[ARG1]], align 4
+; CHECK-NEXT: [[CL:%.*]] = load float, ptr [[ARG2]], align 4
-; CHECK: define internal void @outlined_ir_func_1(i32* [[ARG0:%.*]], i32* [[ARG1:%.*]], i32* [[ARG2:%.*]]) [[ATTR:#[0-9]+]] {
+; CHECK: define internal void @outlined_ir_func_1(ptr [[ARG0:%.*]], ptr [[ARG1:%.*]], ptr [[ARG2:%.*]]) [[ATTR:#[0-9]+]] {
; CHECK: entry_to_outline:
-; CHECK-NEXT: store i32 2, i32* [[ARG0]], align 4
-; CHECK-NEXT: store i32 3, i32* [[ARG1]], align 4
-; CHECK-NEXT: store i32 4, i32* [[ARG2]], align 4
-; CHECK-NEXT: [[AL:%.*]] = load i32, i32* [[ARG0]], align 4
-; CHECK-NEXT: [[BL:%.*]] = load i32, i32* [[ARG1]], align 4
-; CHECK-NEXT: [[CL:%.*]] = load i32, i32* [[ARG2]], align 4
+; CHECK-NEXT: store i32 2, ptr [[ARG0]], align 4
+; CHECK-NEXT: store i32 3, ptr [[ARG1]], align 4
+; CHECK-NEXT: store i32 4, ptr [[ARG2]], align 4
+; CHECK-NEXT: [[AL:%.*]] = load i32, ptr [[ARG0]], align 4
+; CHECK-NEXT: [[BL:%.*]] = load i32, ptr [[ARG1]], align 4
+; CHECK-NEXT: [[CL:%.*]] = load i32, ptr [[ARG2]], align 4
; CHECK: attributes [[ATTR1]] = { minsize optsize "less-precise-fpmad"="false" "no-infs-fp-math"="false" "no-nans-fp-math"="false" "no-signed-zeros-fp-math"="false" "unsafe-fp-math"="false" }
; CHECK-NEXT: [[A:%.*]] = alloca i32, align 4
; CHECK-NEXT: [[B:%.*]] = alloca i32, align 4
; CHECK-NEXT: [[C:%.*]] = alloca i32, align 4
-; CHECK-NEXT: call void @outlined_ir_func_0(i32* [[A]], i32* [[B]], i32* [[C]])
+; CHECK-NEXT: call void @outlined_ir_func_0(ptr [[A]], ptr [[B]], ptr [[C]])
; CHECK-NEXT: ret void
;
entry:
%a = alloca i32, align 4
%b = alloca i32, align 4
%c = alloca i32, align 4
- store i32 2, i32* %a, align 4
- store i32 3, i32* %b, align 4
- store i32 4, i32* %c, align 4
- %al = load i32, i32* %a
- %bl = load i32, i32* %b
- %cl = load i32, i32* %c
+ store i32 2, ptr %a, align 4
+ store i32 3, ptr %b, align 4
+ store i32 4, ptr %c, align 4
+ %al = load i32, ptr %a
+ %bl = load i32, ptr %b
+ %cl = load i32, ptr %c
ret void
}
; CHECK-NEXT: [[A:%.*]] = alloca i32, align 4
; CHECK-NEXT: [[B:%.*]] = alloca i32, align 4
; CHECK-NEXT: [[C:%.*]] = alloca i32, align 4
-; CHECK-NEXT: call void @outlined_ir_func_0(i32* [[A]], i32* [[B]], i32* [[C]])
+; CHECK-NEXT: call void @outlined_ir_func_0(ptr [[A]], ptr [[B]], ptr [[C]])
; CHECK-NEXT: ret void
;
entry:
%a = alloca i32, align 4
%b = alloca i32, align 4
%c = alloca i32, align 4
- store i32 2, i32* %a, align 4
- store i32 3, i32* %b, align 4
- store i32 4, i32* %c, align 4
- %al = load i32, i32* %a
- %bl = load i32, i32* %b
- %cl = load i32, i32* %c
+ store i32 2, ptr %a, align 4
+ store i32 3, ptr %b, align 4
+ store i32 4, ptr %c, align 4
+ %al = load i32, ptr %a
+ %bl = load i32, ptr %b
+ %cl = load i32, ptr %c
ret void
}
attributes #0 = { "no-jump-tables"="true" "profile-sample-accurate"="true" "speculative_load_hardening" "noimplicitfloat"="true" "use-sample-profile"="true"}
-; CHECK: define internal void @outlined_ir_func_0(i32* [[ARG0:%.*]], i32* [[ARG1:%.*]], i32* [[ARG2:%.*]]) [[ATTR:#[0-9]+]] {
+; CHECK: define internal void @outlined_ir_func_0(ptr [[ARG0:%.*]], ptr [[ARG1:%.*]], ptr [[ARG2:%.*]]) [[ATTR:#[0-9]+]] {
; CHECK: entry_to_outline:
-; CHECK-NEXT: store i32 2, i32* [[ARG0]], align 4
-; CHECK-NEXT: store i32 3, i32* [[ARG1]], align 4
-; CHECK-NEXT: store i32 4, i32* [[ARG2]], align 4
-; CHECK-NEXT: [[AL:%.*]] = load i32, i32* [[ARG0]], align 4
-; CHECK-NEXT: [[BL:%.*]] = load i32, i32* [[ARG1]], align 4
-; CHECK-NEXT: [[CL:%.*]] = load i32, i32* [[ARG2]], align 4
+; CHECK-NEXT: store i32 2, ptr [[ARG0]], align 4
+; CHECK-NEXT: store i32 3, ptr [[ARG1]], align 4
+; CHECK-NEXT: store i32 4, ptr [[ARG2]], align 4
+; CHECK-NEXT: [[AL:%.*]] = load i32, ptr [[ARG0]], align 4
+; CHECK-NEXT: [[BL:%.*]] = load i32, ptr [[ARG1]], align 4
+; CHECK-NEXT: [[CL:%.*]] = load i32, ptr [[ARG2]], align 4
; CHECK: attributes [[ATTR]] = { minsize optsize "no-jump-tables"="true" "noimplicitfloat"="true" "profile-sample-accurate"="true" "speculative_load_hardening" "use-sample-profile"="true" }
; CHECK-NEXT: [[A:%.*]] = alloca i32, align 4
; CHECK-NEXT: [[B:%.*]] = alloca i32, align 4
; CHECK-NEXT: [[C:%.*]] = alloca i32, align 4
-; CHECK-NEXT: call void @outlined_ir_func_1(i32 [[TMP0:%.*]], i32* [[A]], i32 [[TMP1:%.*]], i32* [[B]], i32 [[TMP2:%.*]], i32* [[C]])
+; CHECK-NEXT: call void @outlined_ir_func_1(i32 [[TMP0:%.*]], ptr [[A]], i32 [[TMP1:%.*]], ptr [[B]], i32 [[TMP2:%.*]], ptr [[C]])
; CHECK-NEXT: ret void
; CHECK: next:
-; CHECK-NEXT: call void @outlined_ir_func_1(i32 2, i32* [[A]], i32 3, i32* [[B]], i32 4, i32* [[C]])
+; CHECK-NEXT: call void @outlined_ir_func_1(i32 2, ptr [[A]], i32 3, ptr [[B]], i32 4, ptr [[C]])
; CHECK-NEXT: ret void
;
entry:
%a = alloca i32, align 4
%b = alloca i32, align 4
%c = alloca i32, align 4
- store i32 %0, i32* %a, align 4
- store i32 %1, i32* %b, align 4
- store i32 %2, i32* %c, align 4
+ store i32 %0, ptr %a, align 4
+ store i32 %1, ptr %b, align 4
+ store i32 %2, ptr %c, align 4
ret void
next:
- store i32 2, i32* %a, align 4
- store i32 3, i32* %b, align 4
- store i32 4, i32* %c, align 4
+ store i32 2, ptr %a, align 4
+ store i32 3, ptr %b, align 4
+ store i32 4, ptr %c, align 4
ret void
}
; CHECK-NEXT: [[A:%.*]] = alloca i32, align 4
; CHECK-NEXT: [[B:%.*]] = alloca i32, align 4
; CHECK-NEXT: [[C:%.*]] = alloca i32, align 4
-; CHECK-NEXT: [[AL:%.*]] = load i32, i32* [[A]], align 4
-; CHECK-NEXT: [[BL:%.*]] = load i32, i32* [[B]], align 4
-; CHECK-NEXT: [[CL:%.*]] = load i32, i32* [[C]], align 4
+; CHECK-NEXT: [[AL:%.*]] = load i32, ptr [[A]], align 4
+; CHECK-NEXT: [[BL:%.*]] = load i32, ptr [[B]], align 4
+; CHECK-NEXT: [[CL:%.*]] = load i32, ptr [[C]], align 4
; CHECK-NEXT: call void @outlined_ir_func_0(i32 2, i32 [[AL]], i32 3, i32 [[BL]], i32 4, i32 [[CL]])
; CHECK-NEXT: ret void
; CHECK: next:
%a = alloca i32, align 4
%b = alloca i32, align 4
%c = alloca i32, align 4
- %al = load i32, i32* %a
- %bl = load i32, i32* %b
- %cl = load i32, i32* %c
+ %al = load i32, ptr %a
+ %bl = load i32, ptr %b
+ %cl = load i32, ptr %c
%3 = add i32 2, %al
%4 = add i32 3, %bl
%5 = add i32 4, %cl
; CHECK-NEXT: add i32 [[ARG2]], [[ARG3]]
; CHECK-NEXT: add i32 [[ARG4]], [[ARG5]]
-; CHECK: define internal void @outlined_ir_func_1(i32 [[ARG0:%.*]], i32* [[ARG1:%.*]], i32 [[ARG2:%.*]], i32* [[ARG3:%.*]], i32 [[ARG4:%.*]], i32* [[ARG5:%.*]])
+; CHECK: define internal void @outlined_ir_func_1(i32 [[ARG0:%.*]], ptr [[ARG1:%.*]], i32 [[ARG2:%.*]], ptr [[ARG3:%.*]], i32 [[ARG4:%.*]], ptr [[ARG5:%.*]])
; CHECK: entry_to_outline:
-; CHECK-NEXT: store i32 [[ARG0]], i32* [[ARG1]]
-; CHECK-NEXT: store i32 [[ARG2]], i32* [[ARG3]]
-; CHECK-NEXT: store i32 [[ARG4]], i32* [[ARG5]]
+; CHECK-NEXT: store i32 [[ARG0]], ptr [[ARG1]]
+; CHECK-NEXT: store i32 [[ARG2]], ptr [[ARG3]]
+; CHECK-NEXT: store i32 [[ARG4]], ptr [[ARG5]]
; CHECK-NEXT: entry:
; CHECK-NEXT: [[A:%.*]] = alloca i32, align 4
; CHECK-NEXT: [[B:%.*]] = alloca i32, align 4
-; CHECK-NEXT: call void @outlined_ir_func_0(i32* [[A]], i32* [[B]])
+; CHECK-NEXT: call void @outlined_ir_func_0(ptr [[A]], ptr [[B]])
; CHECK-NEXT: ret void
;
; NOCOST-LABEL: @function1(
; NOCOST-NEXT: entry:
; NOCOST-NEXT: [[A:%.*]] = alloca i32, align 4
; NOCOST-NEXT: [[B:%.*]] = alloca i32, align 4
-; NOCOST-NEXT: call void @outlined_ir_func_0(i32* [[A]], i32* [[B]])
+; NOCOST-NEXT: call void @outlined_ir_func_0(ptr [[A]], ptr [[B]])
; NOCOST-NEXT: ret void
;
entry:
%a = alloca i32, align 4
%b = alloca i32, align 4
- %0 = load i32, i32* %a, align 4
- %1 = load i32, i32* %b, align 4
+ %0 = load i32, ptr %a, align 4
+ %1 = load i32, ptr %b, align 4
%add = add i32 %0, %1
%mul = mul i32 %0, %1
%sub = sub i32 %0, %1
; CHECK-NEXT: entry:
; CHECK-NEXT: [[A:%.*]] = alloca i32, align 4
; CHECK-NEXT: [[B:%.*]] = alloca i32, align 4
-; CHECK-NEXT: call void @outlined_ir_func_0(i32* [[A]], i32* [[B]])
+; CHECK-NEXT: call void @outlined_ir_func_0(ptr [[A]], ptr [[B]])
; CHECK-NEXT: ret void
;
; NOCOST-LABEL: @function2(
; NOCOST-NEXT: entry:
; NOCOST-NEXT: [[A:%.*]] = alloca i32, align 4
; NOCOST-NEXT: [[B:%.*]] = alloca i32, align 4
-; NOCOST-NEXT: call void @outlined_ir_func_0(i32* [[A]], i32* [[B]])
+; NOCOST-NEXT: call void @outlined_ir_func_0(ptr [[A]], ptr [[B]])
; NOCOST-NEXT: ret void
;
entry:
%a = alloca i32, align 4
%b = alloca i32, align 4
- %0 = load i32, i32* %a, align 4
- %1 = load i32, i32* %b, align 4
+ %0 = load i32, ptr %a, align 4
+ %1 = load i32, ptr %b, align 4
%add = add i32 %0, %1
%mul = mul i32 %0, %1
%sub = sub i32 %0, %1
; CHECK-NEXT: [[B:%.*]] = alloca i32, align 4
; CHECK-NEXT: [[OUTPUT:%.*]] = alloca i32, align 4
; CHECK-NEXT: [[RESULT:%.*]] = alloca i32, align 4
-; CHECK-NEXT: store i32 2, i32* [[A]], align 4
-; CHECK-NEXT: store i32 3, i32* [[B]], align 4
-; CHECK-NEXT: [[TMP0:%.*]] = load i32, i32* [[A]], align 4
-; CHECK-NEXT: [[TMP1:%.*]] = load i32, i32* [[B]], align 4
+; CHECK-NEXT: store i32 2, ptr [[A]], align 4
+; CHECK-NEXT: store i32 3, ptr [[B]], align 4
+; CHECK-NEXT: [[TMP0:%.*]] = load i32, ptr [[A]], align 4
+; CHECK-NEXT: [[TMP1:%.*]] = load i32, ptr [[B]], align 4
; CHECK-NEXT: [[ADD:%.*]] = add i32 [[TMP0]], [[TMP1]]
-; CHECK-NEXT: store i32 [[ADD]], i32* [[OUTPUT]], align 4
-; CHECK-NEXT: [[TMP2:%.*]] = load i32, i32* [[OUTPUT]], align 4
-; CHECK-NEXT: [[TMP3:%.*]] = load i32, i32* [[OUTPUT]], align 4
+; CHECK-NEXT: store i32 [[ADD]], ptr [[OUTPUT]], align 4
+; CHECK-NEXT: [[TMP2:%.*]] = load i32, ptr [[OUTPUT]], align 4
+; CHECK-NEXT: [[TMP3:%.*]] = load i32, ptr [[OUTPUT]], align 4
; CHECK-NEXT: [[MUL:%.*]] = mul i32 [[TMP2]], [[ADD]]
-; CHECK-NEXT: store i32 [[MUL]], i32* [[RESULT]], align 4
+; CHECK-NEXT: store i32 [[MUL]], ptr [[RESULT]], align 4
; CHECK-NEXT: ret void
;
; NOCOST-LABEL: @function3(
; NOCOST-NEXT: [[B:%.*]] = alloca i32, align 4
; NOCOST-NEXT: [[OUTPUT:%.*]] = alloca i32, align 4
; NOCOST-NEXT: [[RESULT:%.*]] = alloca i32, align 4
-; NOCOST-NEXT: [[LT_CAST:%.*]] = bitcast i32* [[ADD_LOC]] to i8*
-; NOCOST-NEXT: call void @llvm.lifetime.start.p0i8(i64 -1, i8* [[LT_CAST]])
-; NOCOST-NEXT: [[LT_CAST1:%.*]] = bitcast i32* [[DOTLOC]] to i8*
-; NOCOST-NEXT: call void @llvm.lifetime.start.p0i8(i64 -1, i8* [[LT_CAST1]])
-; NOCOST-NEXT: call void @outlined_ir_func_1(i32* [[A]], i32* [[B]], i32* [[OUTPUT]], i32* [[ADD_LOC]], i32* [[DOTLOC]])
-; NOCOST-NEXT: [[ADD_RELOAD:%.*]] = load i32, i32* [[ADD_LOC]], align 4
-; NOCOST-NEXT: [[DOTRELOAD:%.*]] = load i32, i32* [[DOTLOC]], align 4
-; NOCOST-NEXT: call void @llvm.lifetime.end.p0i8(i64 -1, i8* [[LT_CAST]])
-; NOCOST-NEXT: call void @llvm.lifetime.end.p0i8(i64 -1, i8* [[LT_CAST1]])
-; NOCOST-NEXT: [[TMP0:%.*]] = load i32, i32* [[OUTPUT]], align 4
-; NOCOST-NEXT: call void @outlined_ir_func_2(i32 [[DOTRELOAD]], i32 [[ADD_RELOAD]], i32* [[RESULT]])
+; NOCOST-NEXT: call void @llvm.lifetime.start.p0(i64 -1, ptr [[ADD_LOC]])
+; NOCOST-NEXT: call void @llvm.lifetime.start.p0(i64 -1, ptr [[DOTLOC]])
+; NOCOST-NEXT: call void @outlined_ir_func_1(ptr [[A]], ptr [[B]], ptr [[OUTPUT]], ptr [[ADD_LOC]], ptr [[DOTLOC]])
+; NOCOST-NEXT: [[ADD_RELOAD:%.*]] = load i32, ptr [[ADD_LOC]], align 4
+; NOCOST-NEXT: [[DOTRELOAD:%.*]] = load i32, ptr [[DOTLOC]], align 4
+; NOCOST-NEXT: call void @llvm.lifetime.end.p0(i64 -1, ptr [[ADD_LOC]])
+; NOCOST-NEXT: call void @llvm.lifetime.end.p0(i64 -1, ptr [[DOTLOC]])
+; NOCOST-NEXT: [[TMP0:%.*]] = load i32, ptr [[OUTPUT]], align 4
+; NOCOST-NEXT: call void @outlined_ir_func_2(i32 [[DOTRELOAD]], i32 [[ADD_RELOAD]], ptr [[RESULT]])
; NOCOST-NEXT: ret void
;
entry:
%b = alloca i32, align 4
%output = alloca i32, align 4
%result = alloca i32, align 4
- store i32 2, i32* %a, align 4
- store i32 3, i32* %b, align 4
- %0 = load i32, i32* %a, align 4
- %1 = load i32, i32* %b, align 4
+ store i32 2, ptr %a, align 4
+ store i32 3, ptr %b, align 4
+ %0 = load i32, ptr %a, align 4
+ %1 = load i32, ptr %b, align 4
%add = add i32 %0, %1
- store i32 %add, i32* %output, align 4
- %2 = load i32, i32* %output, align 4
- %3 = load i32, i32* %output, align 4
+ store i32 %add, ptr %output, align 4
+ %2 = load i32, ptr %output, align 4
+ %3 = load i32, ptr %output, align 4
%mul = mul i32 %2, %add
- store i32 %mul, i32* %result, align 4
+ store i32 %mul, ptr %result, align 4
ret void
}
; CHECK-NEXT: [[B:%.*]] = alloca i32, align 4
; CHECK-NEXT: [[OUTPUT:%.*]] = alloca i32, align 4
; CHECK-NEXT: [[RESULT:%.*]] = alloca i32, align 4
-; CHECK-NEXT: store i32 2, i32* [[A]], align 4
-; CHECK-NEXT: store i32 3, i32* [[B]], align 4
-; CHECK-NEXT: [[TMP0:%.*]] = load i32, i32* [[A]], align 4
-; CHECK-NEXT: [[TMP1:%.*]] = load i32, i32* [[B]], align 4
+; CHECK-NEXT: store i32 2, ptr [[A]], align 4
+; CHECK-NEXT: store i32 3, ptr [[B]], align 4
+; CHECK-NEXT: [[TMP0:%.*]] = load i32, ptr [[A]], align 4
+; CHECK-NEXT: [[TMP1:%.*]] = load i32, ptr [[B]], align 4
; CHECK-NEXT: [[ADD:%.*]] = add i32 [[TMP0]], [[TMP1]]
-; CHECK-NEXT: store i32 [[ADD]], i32* [[OUTPUT]], align 4
-; CHECK-NEXT: [[TMP2:%.*]] = load i32, i32* [[OUTPUT]], align 4
+; CHECK-NEXT: store i32 [[ADD]], ptr [[OUTPUT]], align 4
+; CHECK-NEXT: [[TMP2:%.*]] = load i32, ptr [[OUTPUT]], align 4
; CHECK-NEXT: [[MUL:%.*]] = mul i32 [[TMP2]], [[ADD]]
-; CHECK-NEXT: store i32 [[MUL]], i32* [[RESULT]], align 4
+; CHECK-NEXT: store i32 [[MUL]], ptr [[RESULT]], align 4
; CHECK-NEXT: ret void
;
; NOCOST-LABEL: @function4(
; NOCOST-NEXT: [[B:%.*]] = alloca i32, align 4
; NOCOST-NEXT: [[OUTPUT:%.*]] = alloca i32, align 4
; NOCOST-NEXT: [[RESULT:%.*]] = alloca i32, align 4
-; NOCOST-NEXT: [[LT_CAST:%.*]] = bitcast i32* [[ADD_LOC]] to i8*
-; NOCOST-NEXT: call void @llvm.lifetime.start.p0i8(i64 -1, i8* [[LT_CAST]])
-; NOCOST-NEXT: [[LT_CAST1:%.*]] = bitcast i32* [[DOTLOC]] to i8*
-; NOCOST-NEXT: call void @llvm.lifetime.start.p0i8(i64 -1, i8* [[LT_CAST1]])
-; NOCOST-NEXT: call void @outlined_ir_func_1(i32* [[A]], i32* [[B]], i32* [[OUTPUT]], i32* [[ADD_LOC]], i32* [[DOTLOC]])
-; NOCOST-NEXT: [[ADD_RELOAD:%.*]] = load i32, i32* [[ADD_LOC]], align 4
-; NOCOST-NEXT: [[DOTRELOAD:%.*]] = load i32, i32* [[DOTLOC]], align 4
-; NOCOST-NEXT: call void @llvm.lifetime.end.p0i8(i64 -1, i8* [[LT_CAST]])
-; NOCOST-NEXT: call void @llvm.lifetime.end.p0i8(i64 -1, i8* [[LT_CAST1]])
-; NOCOST-NEXT: call void @outlined_ir_func_2(i32 [[DOTRELOAD]], i32 [[ADD_RELOAD]], i32* [[RESULT]])
+; NOCOST-NEXT: call void @llvm.lifetime.start.p0(i64 -1, ptr [[ADD_LOC]])
+; NOCOST-NEXT: call void @llvm.lifetime.start.p0(i64 -1, ptr [[DOTLOC]])
+; NOCOST-NEXT: call void @outlined_ir_func_1(ptr [[A]], ptr [[B]], ptr [[OUTPUT]], ptr [[ADD_LOC]], ptr [[DOTLOC]])
+; NOCOST-NEXT: [[ADD_RELOAD:%.*]] = load i32, ptr [[ADD_LOC]], align 4
+; NOCOST-NEXT: [[DOTRELOAD:%.*]] = load i32, ptr [[DOTLOC]], align 4
+; NOCOST-NEXT: call void @llvm.lifetime.end.p0(i64 -1, ptr [[ADD_LOC]])
+; NOCOST-NEXT: call void @llvm.lifetime.end.p0(i64 -1, ptr [[DOTLOC]])
+; NOCOST-NEXT: call void @outlined_ir_func_2(i32 [[DOTRELOAD]], i32 [[ADD_RELOAD]], ptr [[RESULT]])
; NOCOST-NEXT: ret void
;
entry:
%b = alloca i32, align 4
%output = alloca i32, align 4
%result = alloca i32, align 4
- store i32 2, i32* %a, align 4
- store i32 3, i32* %b, align 4
- %0 = load i32, i32* %a, align 4
- %1 = load i32, i32* %b, align 4
+ store i32 2, ptr %a, align 4
+ store i32 3, ptr %b, align 4
+ %0 = load i32, ptr %a, align 4
+ %1 = load i32, ptr %b, align 4
%add = add i32 %0, %1
- store i32 %add, i32* %output, align 4
- %2 = load i32, i32* %output, align 4
+ store i32 %add, ptr %output, align 4
+ %2 = load i32, ptr %output, align 4
%mul = mul i32 %2, %add
- store i32 %mul, i32* %result, align 4
+ store i32 %mul, ptr %result, align 4
ret void
}
; CHECK-NEXT: [[A:%.*]] = alloca i32, align 4
; CHECK-NEXT: [[B:%.*]] = alloca i32, align 4
; CHECK-NEXT: [[C:%.*]] = alloca i32, align 4
-; CHECK-NEXT: call void @outlined_ir_func_0(i32* [[A]], i32* [[B]], i32* [[C]])
+; CHECK-NEXT: call void @outlined_ir_func_0(ptr [[A]], ptr [[B]], ptr [[C]])
; CHECK-NEXT: ret void
;
entry:
%a = alloca i32, align 4
%b = alloca i32, align 4
%c = alloca i32, align 4
- store i32 2, i32* %a, align 4
- store i32 3, i32* %b, align 4
+ store i32 2, ptr %a, align 4
+ store i32 3, ptr %b, align 4
call void @llvm.dbg.value(metadata i64 0, metadata !14, metadata !DIExpression()), !dbg !14
- store i32 4, i32* %c, align 4
- %al = load i32, i32* %a
- %bl = load i32, i32* %b
- %cl = load i32, i32* %c
+ store i32 4, ptr %c, align 4
+ %al = load i32, ptr %a
+ %bl = load i32, ptr %b
+ %cl = load i32, ptr %c
ret void
}
; CHECK-NEXT: [[A:%.*]] = alloca i32, align 4
; CHECK-NEXT: [[B:%.*]] = alloca i32, align 4
; CHECK-NEXT: [[C:%.*]] = alloca i32, align 4
-; CHECK-NEXT: call void @outlined_ir_func_0(i32* [[A]], i32* [[B]], i32* [[C]])
+; CHECK-NEXT: call void @outlined_ir_func_0(ptr [[A]], ptr [[B]], ptr [[C]])
; CHECK-NEXT: ret void
;
entry:
%a = alloca i32, align 4
%b = alloca i32, align 4
%c = alloca i32, align 4
- store i32 2, i32* %a, align 4
- store i32 3, i32* %b, align 4
- store i32 4, i32* %c, align 4
- %al = load i32, i32* %a
- %bl = load i32, i32* %b
- %cl = load i32, i32* %c
+ store i32 2, ptr %a, align 4
+ store i32 3, ptr %b, align 4
+ store i32 4, ptr %c, align 4
+ %al = load i32, ptr %a
+ %bl = load i32, ptr %b
+ %cl = load i32, ptr %c
ret void
}
-; CHECK: define internal void @outlined_ir_func_0(i32* [[ARG0:%.*]], i32* [[ARG1:%.*]], i32* [[ARG2:%.*]]) #1 {
+; CHECK: define internal void @outlined_ir_func_0(ptr [[ARG0:%.*]], ptr [[ARG1:%.*]], ptr [[ARG2:%.*]]) #1 {
; CHECK: entry_to_outline:
-; CHECK-NEXT: store i32 2, i32* [[ARG0]], align 4
-; CHECK-NEXT: store i32 3, i32* [[ARG1]], align 4
-; CHECK-NEXT: store i32 4, i32* [[ARG2]], align 4
-; CHECK-NEXT: [[AL:%.*]] = load i32, i32* [[ARG0]], align 4
-; CHECK-NEXT: [[BL:%.*]] = load i32, i32* [[ARG1]], align 4
-; CHECK-NEXT: [[CL:%.*]] = load i32, i32* [[ARG2]], align 4
+; CHECK-NEXT: store i32 2, ptr [[ARG0]], align 4
+; CHECK-NEXT: store i32 3, ptr [[ARG1]], align 4
+; CHECK-NEXT: store i32 4, ptr [[ARG2]], align 4
+; CHECK-NEXT: [[AL:%.*]] = load i32, ptr [[ARG0]], align 4
+; CHECK-NEXT: [[BL:%.*]] = load i32, ptr [[ARG1]], align 4
+; CHECK-NEXT: [[CL:%.*]] = load i32, ptr [[ARG2]], align 4
!0 = !DIFile(filename: "foo.c", directory: "/tmp")
!1 = !DIBasicType(name: "int", size: 32, encoding: DW_ATE_signed)
; CHECK-NEXT: [[A:%.*]] = alloca i32, align 4
; CHECK-NEXT: [[B:%.*]] = alloca i32, align 4
; CHECK-NEXT: [[C:%.*]] = alloca i32, align 4
-; CHECK-NEXT: call void @outlined_ir_func_0(i32 3, i32* [[A]], i32 4, i32* [[B]], i32 5, i32* [[C]])
+; CHECK-NEXT: call void @outlined_ir_func_0(i32 3, ptr [[A]], i32 4, ptr [[B]], i32 5, ptr [[C]])
; CHECK-NEXT: ret void
;
entry:
%a = alloca i32, align 4
%b = alloca i32, align 4
%c = alloca i32, align 4
- store i32 3, i32* %a, align 4
- store i32 4, i32* %b, align 4
- store i32 5, i32* %c, align 4
- %al = load i32, i32* %a
- %bl = load i32, i32* %b
- %cl = load i32, i32* %c
+ store i32 3, ptr %a, align 4
+ store i32 4, ptr %b, align 4
+ store i32 5, ptr %c, align 4
+ %al = load i32, ptr %a
+ %bl = load i32, ptr %b
+ %cl = load i32, ptr %c
ret void
}
; CHECK-NEXT: [[A:%.*]] = alloca i32, align 4
; CHECK-NEXT: [[B:%.*]] = alloca i32, align 4
; CHECK-NEXT: [[C:%.*]] = alloca i32, align 4
-; CHECK-NEXT: call void @outlined_ir_func_0(i32 2, i32* [[A]], i32 3, i32* [[B]], i32 4, i32* [[C]])
+; CHECK-NEXT: call void @outlined_ir_func_0(i32 2, ptr [[A]], i32 3, ptr [[B]], i32 4, ptr [[C]])
; CHECK-NEXT: ret void
;
entry:
%a = alloca i32, align 4
%b = alloca i32, align 4
%c = alloca i32, align 4
- store i32 2, i32* %a, align 4
- store i32 3, i32* %b, align 4
- store i32 4, i32* %c, align 4
- %al = load i32, i32* %a
- %bl = load i32, i32* %b
- %cl = load i32, i32* %c
+ store i32 2, ptr %a, align 4
+ store i32 3, ptr %b, align 4
+ store i32 4, ptr %c, align 4
+ %al = load i32, ptr %a
+ %bl = load i32, ptr %b
+ %cl = load i32, ptr %c
ret void
}
-; CHECK: define internal void @outlined_ir_func_0(i32 [[ARG0:%.*]], i32* [[ARG1:%.*]], i32 [[ARG2:%.*]], i32* [[ARG3:%.*]], i32 [[ARG4:%.*]], i32* [[ARG5:%.*]]) #0 {
+; CHECK: define internal void @outlined_ir_func_0(i32 [[ARG0:%.*]], ptr [[ARG1:%.*]], i32 [[ARG2:%.*]], ptr [[ARG3:%.*]], i32 [[ARG4:%.*]], ptr [[ARG5:%.*]]) #0 {
; CHECK: entry_to_outline:
-; CHECK-NEXT: store i32 [[ARG0]], i32* [[ARG1]], align 4
-; CHECK-NEXT: store i32 [[ARG2]], i32* [[ARG3]], align 4
-; CHECK-NEXT: store i32 [[ARG4]], i32* [[ARG5]], align 4
-; CHECK-NEXT: [[AL:%.*]] = load i32, i32* [[ARG1]], align 4
-; CHECK-NEXT: [[BL:%.*]] = load i32, i32* [[ARG3]], align 4
-; CHECK-NEXT: [[CL:%.*]] = load i32, i32* [[ARG5]], align 4
+; CHECK-NEXT: store i32 [[ARG0]], ptr [[ARG1]], align 4
+; CHECK-NEXT: store i32 [[ARG2]], ptr [[ARG3]], align 4
+; CHECK-NEXT: store i32 [[ARG4]], ptr [[ARG5]], align 4
+; CHECK-NEXT: [[AL:%.*]] = load i32, ptr [[ARG1]], align 4
+; CHECK-NEXT: [[BL:%.*]] = load i32, ptr [[ARG3]], align 4
+; CHECK-NEXT: [[CL:%.*]] = load i32, ptr [[ARG5]], align 4
define void @outline_globals1() {
; CHECK-LABEL: @outline_globals1(
; CHECK-NEXT: entry:
-; CHECK-NEXT: call void @outlined_ir_func_0(i32* @global1, i32* @global2)
+; CHECK-NEXT: call void @outlined_ir_func_0(ptr @global1, ptr @global2)
; CHECK-NEXT: ret void
;
entry:
- %0 = load i32, i32* @global1
- %1 = load i32, i32* @global2
+ %0 = load i32, ptr @global1
+ %1 = load i32, ptr @global2
%2 = add i32 %0, %1
ret void
}
define void @outline_globals2() {
; CHECK-LABEL: @outline_globals2(
; CHECK-NEXT: entry:
-; CHECK-NEXT: call void @outlined_ir_func_0(i32* @global3, i32* @global4)
+; CHECK-NEXT: call void @outlined_ir_func_0(ptr @global3, ptr @global4)
; CHECK-NEXT: ret void
;
entry:
- %0 = load i32, i32* @global3
- %1 = load i32, i32* @global4
+ %0 = load i32, ptr @global3
+ %1 = load i32, ptr @global4
%2 = add i32 %0, %1
ret void
}
-; CHECK: define internal void @outlined_ir_func_0(i32* [[ARG0:%.*]], i32* [[ARG1:%.*]])
+; CHECK: define internal void @outlined_ir_func_0(ptr [[ARG0:%.*]], ptr [[ARG1:%.*]])
; CHECK: entry_to_outline:
-; CHECK-NEXT: [[TMP0:%.*]] = load i32, i32* [[ARG0]]
-; CHECK-NEXT: [[TMP1:%.*]] = load i32, i32* [[ARG1]]
+; CHECK-NEXT: [[TMP0:%.*]] = load i32, ptr [[ARG0]]
+; CHECK-NEXT: [[TMP1:%.*]] = load i32, ptr [[ARG1]]
; CHECK-NEXT: [[TMP2:%.*]] = add i32 [[TMP0]], [[TMP1]]
; CHECK-NEXT: [[B:%.*]] = alloca i32, align 4
; CHECK-NEXT: [[OUTPUT:%.*]] = alloca i32, align 4
; CHECK-NEXT: [[RESULT:%.*]] = alloca i32, align 4
-; CHECK-NEXT: [[LT_CAST:%.*]] = bitcast i32* [[ADD_LOC]] to i8*
-; CHECK-NEXT: call void @llvm.lifetime.start.p0i8(i64 -1, i8* [[LT_CAST]])
-; CHECK-NEXT: [[LT_CAST1:%.*]] = bitcast i32* [[DOTLOC]] to i8*
-; CHECK-NEXT: call void @llvm.lifetime.start.p0i8(i64 -1, i8* [[LT_CAST1]])
-; CHECK-NEXT: call void @outlined_ir_func_0(i32* [[A]], i32* [[B]], i32* [[OUTPUT]], i32* [[ADD_LOC]], i32* [[DOTLOC]], i32 0)
-; CHECK-NEXT: [[ADD_RELOAD:%.*]] = load i32, i32* [[ADD_LOC]], align 4
-; CHECK-NEXT: [[DOTRELOAD:%.*]] = load i32, i32* [[DOTLOC]], align 4
-; CHECK-NEXT: call void @llvm.lifetime.end.p0i8(i64 -1, i8* [[LT_CAST]])
-; CHECK-NEXT: call void @llvm.lifetime.end.p0i8(i64 -1, i8* [[LT_CAST1]])
-; CHECK-NEXT: [[TMP0:%.*]] = load i32, i32* [[OUTPUT]], align 4
-; CHECK-NEXT: call void @outlined_ir_func_1(i32 [[DOTRELOAD]], i32 [[ADD_RELOAD]], i32* [[RESULT]])
+; CHECK-NEXT: call void @llvm.lifetime.start.p0(i64 -1, ptr [[ADD_LOC]])
+; CHECK-NEXT: call void @llvm.lifetime.start.p0(i64 -1, ptr [[DOTLOC]])
+; CHECK-NEXT: call void @outlined_ir_func_0(ptr [[A]], ptr [[B]], ptr [[OUTPUT]], ptr [[ADD_LOC]], ptr [[DOTLOC]], i32 0)
+; CHECK-NEXT: [[ADD_RELOAD:%.*]] = load i32, ptr [[ADD_LOC]], align 4
+; CHECK-NEXT: [[DOTRELOAD:%.*]] = load i32, ptr [[DOTLOC]], align 4
+; CHECK-NEXT: call void @llvm.lifetime.end.p0(i64 -1, ptr [[ADD_LOC]])
+; CHECK-NEXT: call void @llvm.lifetime.end.p0(i64 -1, ptr [[DOTLOC]])
+; CHECK-NEXT: [[TMP0:%.*]] = load i32, ptr [[OUTPUT]], align 4
+; CHECK-NEXT: call void @outlined_ir_func_1(i32 [[DOTRELOAD]], i32 [[ADD_RELOAD]], ptr [[RESULT]])
; CHECK-NEXT: ret void
;
entry:
%b = alloca i32, align 4
%output = alloca i32, align 4
%result = alloca i32, align 4
- store i32 2, i32* %a, align 4
- store i32 3, i32* %b, align 4
- %0 = load i32, i32* %a, align 4
- %1 = load i32, i32* %b, align 4
+ store i32 2, ptr %a, align 4
+ store i32 3, ptr %b, align 4
+ %0 = load i32, ptr %a, align 4
+ %1 = load i32, ptr %b, align 4
%add = add i32 %0, %1
%sub = sub i32 %0, %1
- store i32 %add, i32* %output, align 4
- %2 = load i32, i32* %output, align 4
- %3 = load i32, i32* %output, align 4
+ store i32 %add, ptr %output, align 4
+ %2 = load i32, ptr %output, align 4
+ %3 = load i32, ptr %output, align 4
%mul = mul i32 %2, %add
- store i32 %mul, i32* %result, align 4
+ store i32 %mul, ptr %result, align 4
ret void
}
; CHECK-NEXT: [[B:%.*]] = alloca i32, align 4
; CHECK-NEXT: [[OUTPUT:%.*]] = alloca i32, align 4
; CHECK-NEXT: [[RESULT:%.*]] = alloca i32, align 4
-; CHECK-NEXT: [[LT_CAST:%.*]] = bitcast i32* [[SUB_LOC]] to i8*
-; CHECK-NEXT: call void @llvm.lifetime.start.p0i8(i64 -1, i8* [[LT_CAST]])
-; CHECK-NEXT: [[LT_CAST1:%.*]] = bitcast i32* [[DOTLOC]] to i8*
-; CHECK-NEXT: call void @llvm.lifetime.start.p0i8(i64 -1, i8* [[LT_CAST1]])
-; CHECK-NEXT: call void @outlined_ir_func_0(i32* [[A]], i32* [[B]], i32* [[OUTPUT]], i32* [[SUB_LOC]], i32* [[DOTLOC]], i32 1)
-; CHECK-NEXT: [[SUB_RELOAD:%.*]] = load i32, i32* [[SUB_LOC]], align 4
-; CHECK-NEXT: [[DOTRELOAD:%.*]] = load i32, i32* [[DOTLOC]], align 4
-; CHECK-NEXT: call void @llvm.lifetime.end.p0i8(i64 -1, i8* [[LT_CAST]])
-; CHECK-NEXT: call void @llvm.lifetime.end.p0i8(i64 -1, i8* [[LT_CAST1]])
-; CHECK-NEXT: call void @outlined_ir_func_1(i32 [[DOTRELOAD]], i32 [[SUB_RELOAD]], i32* [[RESULT]])
+; CHECK-NEXT: call void @llvm.lifetime.start.p0(i64 -1, ptr [[SUB_LOC]])
+; CHECK-NEXT: call void @llvm.lifetime.start.p0(i64 -1, ptr [[DOTLOC]])
+; CHECK-NEXT: call void @outlined_ir_func_0(ptr [[A]], ptr [[B]], ptr [[OUTPUT]], ptr [[SUB_LOC]], ptr [[DOTLOC]], i32 1)
+; CHECK-NEXT: [[SUB_RELOAD:%.*]] = load i32, ptr [[SUB_LOC]], align 4
+; CHECK-NEXT: [[DOTRELOAD:%.*]] = load i32, ptr [[DOTLOC]], align 4
+; CHECK-NEXT: call void @llvm.lifetime.end.p0(i64 -1, ptr [[SUB_LOC]])
+; CHECK-NEXT: call void @llvm.lifetime.end.p0(i64 -1, ptr [[DOTLOC]])
+; CHECK-NEXT: call void @outlined_ir_func_1(i32 [[DOTRELOAD]], i32 [[SUB_RELOAD]], ptr [[RESULT]])
; CHECK-NEXT: ret void
;
entry:
%b = alloca i32, align 4
%output = alloca i32, align 4
%result = alloca i32, align 4
- store i32 2, i32* %a, align 4
- store i32 3, i32* %b, align 4
- %0 = load i32, i32* %a, align 4
- %1 = load i32, i32* %b, align 4
+ store i32 2, ptr %a, align 4
+ store i32 3, ptr %b, align 4
+ %0 = load i32, ptr %a, align 4
+ %1 = load i32, ptr %b, align 4
%add = add i32 %0, %1
%sub = sub i32 %0, %1
- store i32 %add, i32* %output, align 4
- %2 = load i32, i32* %output, align 4
+ store i32 %add, ptr %output, align 4
+ %2 = load i32, ptr %output, align 4
%mul = mul i32 %2, %sub
- store i32 %mul, i32* %result, align 4
+ store i32 %mul, ptr %result, align 4
ret void
}
-; CHECK: define internal void @outlined_ir_func_0(i32* [[ARG0:%.*]], i32* [[ARG1:%.*]], i32* [[ARG2:%.*]], i32* [[ARG3:%.*]], i32* [[ARG4:%.*]], i32 [[ARG5:%.*]]) #1 {
+; CHECK: define internal void @outlined_ir_func_0(ptr [[ARG0:%.*]], ptr [[ARG1:%.*]], ptr [[ARG2:%.*]], ptr [[ARG3:%.*]], ptr [[ARG4:%.*]], i32 [[ARG5:%.*]]) #1 {
; CHECK: entry_to_outline:
-; CHECK-NEXT: store i32 2, i32* [[ARG0]], align 4
-; CHECK-NEXT: store i32 3, i32* [[ARG1]], align 4
-; CHECK-NEXT: [[TMP0:%.*]] = load i32, i32* [[ARG0]], align 4
-; CHECK-NEXT: [[TMP1:%.*]] = load i32, i32* [[ARG1]], align 4
+; CHECK-NEXT: store i32 2, ptr [[ARG0]], align 4
+; CHECK-NEXT: store i32 3, ptr [[ARG1]], align 4
+; CHECK-NEXT: [[TMP0:%.*]] = load i32, ptr [[ARG0]], align 4
+; CHECK-NEXT: [[TMP1:%.*]] = load i32, ptr [[ARG1]], align 4
; CHECK-NEXT: [[ADD:%.*]] = add i32 [[TMP0]], [[TMP1]]
; CHECK-NEXT: [[SUB:%.*]] = sub i32 [[TMP0]], [[TMP1]]
-; CHECK-NEXT: store i32 [[ADD]], i32* [[ARG2]], align 4
-; CHECK-NEXT: [[TMP2:%.*]] = load i32, i32* [[ARG2]], align 4
+; CHECK-NEXT: store i32 [[ADD]], ptr [[ARG2]], align 4
+; CHECK-NEXT: [[TMP2:%.*]] = load i32, ptr [[ARG2]], align 4
; CHECK: _after_outline.exitStub:
; CHECK-NEXT: switch i32 [[ARG5]], label [[BLOCK:%.*]] [
; CHECK-NEXT: i32 1, label %[[BLOCK_1:.*]]
; CHECK: [[BLOCK_0]]:
-; CHECK-NEXT: store i32 [[ADD]], i32* [[ARG3]], align 4
-; CHECK-NEXT: store i32 [[TMP2]], i32* [[ARG4]], align 4
+; CHECK-NEXT: store i32 [[ADD]], ptr [[ARG3]], align 4
+; CHECK-NEXT: store i32 [[TMP2]], ptr [[ARG4]], align 4
; CHECK: [[BLOCK_1]]:
-; CHECK-NEXT: store i32 [[SUB]], i32* [[ARG3]], align 4
-; CHECK-NEXT: store i32 [[TMP2]], i32* [[ARG4]], align 4
+; CHECK-NEXT: store i32 [[SUB]], ptr [[ARG3]], align 4
+; CHECK-NEXT: store i32 [[TMP2]], ptr [[ARG4]], align 4
; CHECK-NEXT: [[A:%.*]] = alloca i32, align 4
; CHECK-NEXT: [[B:%.*]] = alloca i32, align 4
; CHECK-NEXT: [[C:%.*]] = alloca i32, align 4
-; CHECK-NEXT: store i32 2, i32* [[A]], align 4
-; CHECK-NEXT: store i32 3, i32* [[B]], align 4
-; CHECK-NEXT: store i32 4, i32* [[C]], align 4
-; CHECK-NEXT: call void @[[FUNCTION_0:.*]](i32* [[A]], i32* [[C]], i32* [[B]])
+; CHECK-NEXT: store i32 2, ptr [[A]], align 4
+; CHECK-NEXT: store i32 3, ptr [[B]], align 4
+; CHECK-NEXT: store i32 4, ptr [[C]], align 4
+; CHECK-NEXT: call void @[[FUNCTION_0:.*]](ptr [[A]], ptr [[C]], ptr [[B]])
; CHECK-NEXT: ret void
;
entry:
%a = alloca i32, align 4
%b = alloca i32, align 4
%c = alloca i32, align 4
- store i32 2, i32* %a, align 4
- store i32 3, i32* %b, align 4
- store i32 4, i32* %c, align 4
- %al = load i32, i32* %a
- %cl = load i32, i32* %c
- %bl = load i32, i32* %b
+ store i32 2, ptr %a, align 4
+ store i32 3, ptr %b, align 4
+ store i32 4, ptr %c, align 4
+ %al = load i32, ptr %a
+ %cl = load i32, ptr %c
+ %bl = load i32, ptr %b
ret void
}
; CHECK-NEXT: [[A:%.*]] = alloca i32, align 4
; CHECK-NEXT: [[B:%.*]] = alloca i32, align 4
; CHECK-NEXT: [[C:%.*]] = alloca i32, align 4
-; CHECK-NEXT: store i32 2, i32* [[A]], align 4
-; CHECK-NEXT: store i32 3, i32* [[B]], align 4
-; CHECK-NEXT: store i32 4, i32* [[C]], align 4
-; CHECK-NEXT: call void @[[FUNCTION_0]](i32* [[A]], i32* [[B]], i32* [[C]])
+; CHECK-NEXT: store i32 2, ptr [[A]], align 4
+; CHECK-NEXT: store i32 3, ptr [[B]], align 4
+; CHECK-NEXT: store i32 4, ptr [[C]], align 4
+; CHECK-NEXT: call void @[[FUNCTION_0]](ptr [[A]], ptr [[B]], ptr [[C]])
; CHECK-NEXT: ret void
;
entry:
%a = alloca i32, align 4
%b = alloca i32, align 4
%c = alloca i32, align 4
- store i32 2, i32* %a, align 4
- store i32 3, i32* %b, align 4
- store i32 4, i32* %c, align 4
- %al = load i32, i32* %a
- %bl = load i32, i32* %b
- %cl = load i32, i32* %c
+ store i32 2, ptr %a, align 4
+ store i32 3, ptr %b, align 4
+ store i32 4, ptr %c, align 4
+ %al = load i32, ptr %a
+ %bl = load i32, ptr %b
+ %cl = load i32, ptr %c
ret void
}
-; CHECK: define internal void @[[FUNCTION_0]](i32* [[ARG0:%.*]], i32* [[ARG1:%.*]], i32* [[ARG2:%.*]])
+; CHECK: define internal void @[[FUNCTION_0]](ptr [[ARG0:%.*]], ptr [[ARG1:%.*]], ptr [[ARG2:%.*]])
; CHECK: entry_to_outline:
-; CHECK-NEXT: [[AL:%.*]] = load i32, i32* [[ARG0]], align 4
-; CHECK-NEXT: [[BL:%.*]] = load i32, i32* [[ARG1]], align 4
-; CHECK-NEXT: [[CL:%.*]] = load i32, i32* [[ARG2]], align 4
+; CHECK-NEXT: [[AL:%.*]] = load i32, ptr [[ARG0]], align 4
+; CHECK-NEXT: [[BL:%.*]] = load i32, ptr [[ARG1]], align 4
+; CHECK-NEXT: [[CL:%.*]] = load i32, ptr [[ARG2]], align 4
; Show that we do not extract similar regions that would involve the splitting
; of phi nodes on exit.
-define void @function1(i32* %a, i32* %b) {
+define void @function1(ptr %a, ptr %b) {
entry:
%0 = alloca i32, align 4
- %c = load i32, i32* %0, align 4
+ %c = load i32, ptr %0, align 4
br label %test1
test1:
- %e = load i32, i32* %0, align 4
+ %e = load i32, ptr %0, align 4
br label %first
test:
- %d = load i32, i32* %0, align 4
+ %d = load i32, ptr %0, align 4
br label %first
dummy:
ret void
ret void
}
-define void @function2(i32* %a, i32* %b) {
+define void @function2(ptr %a, ptr %b) {
entry:
%0 = alloca i32, align 4
- %c = load i32, i32* %0, align 4
+ %c = load i32, ptr %0, align 4
br label %test1
test1:
- %e = load i32, i32* %0, align 4
+ %e = load i32, ptr %0, align 4
br label %first
test:
- %d = load i32, i32* %0, align 4
+ %d = load i32, ptr %0, align 4
br label %first
dummy:
ret void
; CHECK-NEXT: entry:
; CHECK-NEXT: [[DOTCE_LOC:%.*]] = alloca i32, align 4
; CHECK-NEXT: [[TMP0:%.*]] = alloca i32, align 4
-; CHECK-NEXT: [[LT_CAST:%.*]] = bitcast i32* [[DOTCE_LOC]] to i8*
-; CHECK-NEXT: call void @llvm.lifetime.start.p0i8(i64 -1, i8* [[LT_CAST]])
-; CHECK-NEXT: call void @outlined_ir_func_0(i32* [[TMP0]], i32* [[DOTCE_LOC]])
-; CHECK-NEXT: [[DOTCE_RELOAD:%.*]] = load i32, i32* [[DOTCE_LOC]], align 4
-; CHECK-NEXT: call void @llvm.lifetime.end.p0i8(i64 -1, i8* [[LT_CAST]])
+; CHECK-NEXT: call void @llvm.lifetime.start.p0(i64 -1, ptr [[DOTCE_LOC]])
+; CHECK-NEXT: call void @outlined_ir_func_0(ptr [[TMP0]], ptr [[DOTCE_LOC]])
+; CHECK-NEXT: [[DOTCE_RELOAD:%.*]] = load i32, ptr [[DOTCE_LOC]], align 4
+; CHECK-NEXT: call void @llvm.lifetime.end.p0(i64 -1, ptr [[DOTCE_LOC]])
; CHECK-NEXT: br label [[FIRST:%.*]]
; CHECK: dummy:
; CHECK-NEXT: ret void
; CHECK-NEXT: entry:
; CHECK-NEXT: [[DOTCE_LOC:%.*]] = alloca i32, align 4
; CHECK-NEXT: [[TMP0:%.*]] = alloca i32, align 4
-; CHECK-NEXT: [[LT_CAST:%.*]] = bitcast i32* [[DOTCE_LOC]] to i8*
-; CHECK-NEXT: call void @llvm.lifetime.start.p0i8(i64 -1, i8* [[LT_CAST]])
-; CHECK-NEXT: call void @outlined_ir_func_0(i32* [[TMP0]], i32* [[DOTCE_LOC]])
-; CHECK-NEXT: [[DOTCE_RELOAD:%.*]] = load i32, i32* [[DOTCE_LOC]], align 4
-; CHECK-NEXT: call void @llvm.lifetime.end.p0i8(i64 -1, i8* [[LT_CAST]])
+; CHECK-NEXT: call void @llvm.lifetime.start.p0(i64 -1, ptr [[DOTCE_LOC]])
+; CHECK-NEXT: call void @outlined_ir_func_0(ptr [[TMP0]], ptr [[DOTCE_LOC]])
+; CHECK-NEXT: [[DOTCE_RELOAD:%.*]] = load i32, ptr [[DOTCE_LOC]], align 4
+; CHECK-NEXT: call void @llvm.lifetime.end.p0(i64 -1, ptr [[DOTCE_LOC]])
; CHECK-NEXT: br label [[FIRST:%.*]]
; CHECK: dummy:
; CHECK-NEXT: ret void
; CHECK-NEXT: newFuncRoot:
; CHECK-NEXT: br label [[ENTRY_TO_OUTLINE:%.*]]
; CHECK: entry_to_outline:
-; CHECK-NEXT: [[C:%.*]] = load i32, i32* [[TMP0:%.*]], align 4
+; CHECK-NEXT: [[C:%.*]] = load i32, ptr [[TMP0:%.*]], align 4
; CHECK-NEXT: br label [[TEST1:%.*]]
; CHECK: test1:
-; CHECK-NEXT: [[E:%.*]] = load i32, i32* [[TMP0]], align 4
+; CHECK-NEXT: [[E:%.*]] = load i32, ptr [[TMP0]], align 4
; CHECK-NEXT: br label [[FIRST_SPLIT:%.*]]
; CHECK: test:
-; CHECK-NEXT: [[D:%.*]] = load i32, i32* [[TMP0]], align 4
+; CHECK-NEXT: [[D:%.*]] = load i32, ptr [[TMP0]], align 4
; CHECK-NEXT: br label [[FIRST_SPLIT]]
; CHECK: first.split:
; CHECK-NEXT: [[DOTCE:%.*]] = phi i32 [ [[C]], [[TEST:%.*]] ], [ [[E]], [[TEST1]] ]
; CHECK-NEXT: br label [[FIRST_EXITSTUB:%.*]]
; CHECK: first.exitStub:
-; CHECK-NEXT: store i32 [[DOTCE]], i32* [[TMP1:%.*]], align 4
+; CHECK-NEXT: store i32 [[DOTCE]], ptr [[TMP1:%.*]], align 4
; CHECK-NEXT: ret void
;
%struct.RT = type { i8, [10 x [20 x i32]], i8 }
%struct.ST = type { i32, double, %struct.RT }
-define void @function1(%struct.ST* %s, i64 %t) {
+define void @function1(ptr %s, i64 %t) {
; CHECK-LABEL: @function1(
; CHECK-NEXT: entry:
; CHECK-NEXT: [[A:%.*]] = alloca i32, align 4
; CHECK-NEXT: [[B:%.*]] = alloca i32, align 4
-; CHECK-NEXT: call void @outlined_ir_func_0(i32* [[A]], i32* [[B]], %struct.ST* [[S:%.*]], i64 [[T:%.*]])
+; CHECK-NEXT: call void @outlined_ir_func_0(ptr [[A]], ptr [[B]], ptr [[S:%.*]], i64 [[T:%.*]])
; CHECK-NEXT: ret void
;
entry:
%a = alloca i32, align 4
%b = alloca i32, align 4
- store i32 2, i32* %a, align 4
- store i32 3, i32* %b, align 4
- %0 = getelementptr inbounds %struct.ST, %struct.ST* %s, i64 %t, i32 1
+ store i32 2, ptr %a, align 4
+ store i32 3, ptr %b, align 4
+ %0 = getelementptr inbounds %struct.ST, ptr %s, i64 %t, i32 1
ret void
}
-define void @function2(%struct.ST* %s, i64 %t) {
+define void @function2(ptr %s, i64 %t) {
; CHECK-LABEL: @function2(
; CHECK-NEXT: entry:
; CHECK-NEXT: [[A:%.*]] = alloca i32, align 4
; CHECK-NEXT: [[B:%.*]] = alloca i32, align 4
-; CHECK-NEXT: call void @outlined_ir_func_0(i32* [[A]], i32* [[B]], %struct.ST* [[S:%.*]], i64 [[T:%.*]])
+; CHECK-NEXT: call void @outlined_ir_func_0(ptr [[A]], ptr [[B]], ptr [[S:%.*]], i64 [[T:%.*]])
; CHECK-NEXT: ret void
;
entry:
%a = alloca i32, align 4
%b = alloca i32, align 4
- store i32 2, i32* %a, align 4
- store i32 3, i32* %b, align 4
- %0 = getelementptr inbounds %struct.ST, %struct.ST* %s, i64 %t, i32 1
+ store i32 2, ptr %a, align 4
+ store i32 3, ptr %b, align 4
+ %0 = getelementptr inbounds %struct.ST, ptr %s, i64 %t, i32 1
ret void
}
-define void @function3(%struct.ST* %s, i64 %t) {
+define void @function3(ptr %s, i64 %t) {
; CHECK-LABEL: @function3(
; CHECK-NEXT: entry:
; CHECK-NEXT: [[A:%.*]] = alloca i32, align 4
; CHECK-NEXT: [[B:%.*]] = alloca i32, align 4
-; CHECK-NEXT: store i32 2, i32* [[A]], align 4
-; CHECK-NEXT: store i32 3, i32* [[B]], align 4
-; CHECK-NEXT: [[TMP0:%.*]] = getelementptr inbounds [[STRUCT_ST:%.*]], %struct.ST* [[S:%.*]], i64 [[T:%.*]], i32 0
+; CHECK-NEXT: store i32 2, ptr [[A]], align 4
+; CHECK-NEXT: store i32 3, ptr [[B]], align 4
+; CHECK-NEXT: [[TMP0:%.*]] = getelementptr inbounds [[STRUCT_ST:%.*]], ptr [[S:%.*]], i64 [[T:%.*]], i32 0
; CHECK-NEXT: ret void
;
entry:
%a = alloca i32, align 4
%b = alloca i32, align 4
- store i32 2, i32* %a, align 4
- store i32 3, i32* %b, align 4
- %0 = getelementptr inbounds %struct.ST, %struct.ST* %s, i64 %t, i32 0
+ store i32 2, ptr %a, align 4
+ store i32 3, ptr %b, align 4
+ %0 = getelementptr inbounds %struct.ST, ptr %s, i64 %t, i32 0
ret void
}
-; CHECK: define internal void @outlined_ir_func_0(i32* [[ARG0:%.*]], i32* [[ARG1:%.*]], %struct.ST* [[ARG2:%.*]], i64 [[ARG3:%.*]])
+; CHECK: define internal void @outlined_ir_func_0(ptr [[ARG0:%.*]], ptr [[ARG1:%.*]], ptr [[ARG2:%.*]], i64 [[ARG3:%.*]])
; CHECK: entry_to_outline:
-; CHECK-NEXT: store i32 2, i32* [[ARG0]], align 4
-; CHECK-NEXT: store i32 3, i32* [[ARG1]], align 4
-; CHECK-NEXT: [[TMP0:%.*]] = getelementptr inbounds %struct.ST, %struct.ST* [[ARG2]], i64 [[ARG3]], i32 1
+; CHECK-NEXT: store i32 2, ptr [[ARG0]], align 4
+; CHECK-NEXT: store i32 3, ptr [[ARG1]], align 4
+; CHECK-NEXT: [[TMP0:%.*]] = getelementptr inbounds %struct.ST, ptr [[ARG2]], i64 [[ARG3]], i32 1
; CHECK-NEXT: entry:
; CHECK-NEXT: [[A:%.*]] = alloca i32, align 4
; CHECK-NEXT: [[B:%.*]] = alloca i32, align 4
-; CHECK-NEXT: call void @outlined_ir_func_1(i32* [[A]], i32* [[B]])
+; CHECK-NEXT: call void @outlined_ir_func_1(ptr [[A]], ptr [[B]])
; CHECK-NEXT: ret void
;
entry:
%a = alloca i32, align 4
%b = alloca i32, align 4
- store i32 2, i32* %a, align 4
- store i32 3, i32* %b, align 4
- %al = load i32, i32* %a
- %bl = load i32, i32* %b
+ store i32 2, ptr %a, align 4
+ store i32 3, ptr %b, align 4
+ %al = load i32, ptr %a
+ %bl = load i32, ptr %b
%0 = icmp slt i32 %al, %bl
ret void
}
; CHECK-NEXT: entry:
; CHECK-NEXT: [[A:%.*]] = alloca i32, align 4
; CHECK-NEXT: [[B:%.*]] = alloca i32, align 4
-; CHECK-NEXT: call void @outlined_ir_func_1(i32* [[A]], i32* [[B]])
+; CHECK-NEXT: call void @outlined_ir_func_1(ptr [[A]], ptr [[B]])
; CHECK-NEXT: ret void
;
entry:
%a = alloca i32, align 4
%b = alloca i32, align 4
- store i32 2, i32* %a, align 4
- store i32 3, i32* %b, align 4
- %al = load i32, i32* %a
- %bl = load i32, i32* %b
+ store i32 2, ptr %a, align 4
+ store i32 3, ptr %b, align 4
+ %al = load i32, ptr %a
+ %bl = load i32, ptr %b
%0 = icmp slt i32 %al, %bl
ret void
}
; CHECK-NEXT: entry:
; CHECK-NEXT: [[A:%.*]] = alloca i32, align 4
; CHECK-NEXT: [[B:%.*]] = alloca i32, align 4
-; CHECK-NEXT: call void @outlined_ir_func_1(i32* [[A]], i32* [[B]])
+; CHECK-NEXT: call void @outlined_ir_func_1(ptr [[A]], ptr [[B]])
; CHECK-NEXT: ret void
;
entry:
%a = alloca i32, align 4
%b = alloca i32, align 4
- store i32 2, i32* %a, align 4
- store i32 3, i32* %b, align 4
- %al = load i32, i32* %a
- %bl = load i32, i32* %b
+ store i32 2, ptr %a, align 4
+ store i32 3, ptr %b, align 4
+ %al = load i32, ptr %a
+ %bl = load i32, ptr %b
%0 = icmp sgt i32 %bl, %al
ret void
}
; CHECK-NEXT: entry:
; CHECK-NEXT: [[A:%.*]] = alloca i32, align 4
; CHECK-NEXT: [[B:%.*]] = alloca i32, align 4
-; CHECK-NEXT: store i32 2, i32* [[A]], align 4
-; CHECK-NEXT: store i32 3, i32* [[B]], align 4
-; CHECK-NEXT: [[AL:%.*]] = load i32, i32* [[A]], align 4
-; CHECK-NEXT: [[BL:%.*]] = load i32, i32* [[B]], align 4
+; CHECK-NEXT: store i32 2, ptr [[A]], align 4
+; CHECK-NEXT: store i32 3, ptr [[B]], align 4
+; CHECK-NEXT: [[AL:%.*]] = load i32, ptr [[A]], align 4
+; CHECK-NEXT: [[BL:%.*]] = load i32, ptr [[B]], align 4
; CHECK-NEXT: [[TMP0:%.*]] = icmp sgt i32 [[AL]], [[BL]]
; CHECK-NEXT: ret void
;
entry:
%a = alloca i32, align 4
%b = alloca i32, align 4
- store i32 2, i32* %a, align 4
- store i32 3, i32* %b, align 4
- %al = load i32, i32* %a
- %bl = load i32, i32* %b
+ store i32 2, ptr %a, align 4
+ store i32 3, ptr %b, align 4
+ %al = load i32, ptr %a
+ %bl = load i32, ptr %b
%0 = icmp sgt i32 %al, %bl
ret void
}
; CHECK-NEXT: entry:
; CHECK-NEXT: [[A:%.*]] = alloca i32, align 4
; CHECK-NEXT: [[B:%.*]] = alloca i32, align 4
-; CHECK-NEXT: call void @outlined_ir_func_0(i32* [[A]], i32* [[B]])
+; CHECK-NEXT: call void @outlined_ir_func_0(ptr [[A]], ptr [[B]])
; CHECK-NEXT: ret void
;
entry:
%a = alloca i32, align 4
%b = alloca i32, align 4
- store i32 2, i32* %a, align 4
- store i32 3, i32* %b, align 4
- %al = load i32, i32* %a
- %bl = load i32, i32* %b
+ store i32 2, ptr %a, align 4
+ store i32 3, ptr %b, align 4
+ %al = load i32, ptr %a
+ %bl = load i32, ptr %b
%0 = icmp ugt i32 %al, %bl
ret void
}
; CHECK-NEXT: entry:
; CHECK-NEXT: [[A:%.*]] = alloca i32, align 4
; CHECK-NEXT: [[B:%.*]] = alloca i32, align 4
-; CHECK-NEXT: call void @outlined_ir_func_0(i32* [[A]], i32* [[B]])
+; CHECK-NEXT: call void @outlined_ir_func_0(ptr [[A]], ptr [[B]])
; CHECK-NEXT: ret void
;
entry:
%a = alloca i32, align 4
%b = alloca i32, align 4
- store i32 2, i32* %a, align 4
- store i32 3, i32* %b, align 4
- %al = load i32, i32* %a
- %bl = load i32, i32* %b
+ store i32 2, ptr %a, align 4
+ store i32 3, ptr %b, align 4
+ %al = load i32, ptr %a
+ %bl = load i32, ptr %b
%0 = icmp ugt i32 %al, %bl
ret void
}
; CHECK-NEXT: entry:
; CHECK-NEXT: [[A:%.*]] = alloca i32, align 4
; CHECK-NEXT: [[B:%.*]] = alloca i32, align 4
-; CHECK-NEXT: call void @outlined_ir_func_0(i32* [[A]], i32* [[B]])
+; CHECK-NEXT: call void @outlined_ir_func_0(ptr [[A]], ptr [[B]])
; CHECK-NEXT: ret void
;
entry:
%a = alloca i32, align 4
%b = alloca i32, align 4
- store i32 2, i32* %a, align 4
- store i32 3, i32* %b, align 4
- %al = load i32, i32* %a
- %bl = load i32, i32* %b
+ store i32 2, ptr %a, align 4
+ store i32 3, ptr %b, align 4
+ %al = load i32, ptr %a
+ %bl = load i32, ptr %b
%0 = icmp ult i32 %bl, %al
ret void
}
-; CHECK: define internal void @outlined_ir_func_0(i32* [[ARG0:%.*]], i32* [[ARG1:%.*]]) #0 {
+; CHECK: define internal void @outlined_ir_func_0(ptr [[ARG0:%.*]], ptr [[ARG1:%.*]]) #0 {
; CHECK: entry_to_outline:
-; CHECK-NEXT: store i32 2, i32* [[ARG0]], align 4
-; CHECK-NEXT: store i32 3, i32* [[ARG1]], align 4
-; CHECK-NEXT: [[AL:%.*]] = load i32, i32* [[ARG0]], align 4
-; CHECK-NEXT: [[BL:%.*]] = load i32, i32* [[ARG1]], align 4
+; CHECK-NEXT: store i32 2, ptr [[ARG0]], align 4
+; CHECK-NEXT: store i32 3, ptr [[ARG1]], align 4
+; CHECK-NEXT: [[AL:%.*]] = load i32, ptr [[ARG0]], align 4
+; CHECK-NEXT: [[BL:%.*]] = load i32, ptr [[ARG1]], align 4
; CHECK-NEXT: [[TMP0:%.*]] = icmp ugt i32 [[AL]], [[BL]]
-; CHECK: define internal void @outlined_ir_func_1(i32* [[ARG0:%.*]], i32* [[ARG1:%.*]]) #0 {
+; CHECK: define internal void @outlined_ir_func_1(ptr [[ARG0:%.*]], ptr [[ARG1:%.*]]) #0 {
; CHECK: entry_to_outline:
-; CHECK-NEXT: store i32 2, i32* [[ARG0]], align 4
-; CHECK-NEXT: store i32 3, i32* [[ARG1]], align 4
-; CHECK-NEXT: [[AL:%.*]] = load i32, i32* [[ARG0]], align 4
-; CHECK-NEXT: [[BL:%.*]] = load i32, i32* [[ARG1]], align 4
+; CHECK-NEXT: store i32 2, ptr [[ARG0]], align 4
+; CHECK-NEXT: store i32 3, ptr [[ARG1]], align 4
+; CHECK-NEXT: [[AL:%.*]] = load i32, ptr [[ARG0]], align 4
+; CHECK-NEXT: [[BL:%.*]] = load i32, ptr [[ARG1]], align 4
; CHECK-NEXT: [[TMP0:%.*]] = icmp slt i32 [[AL]], [[BL]]
%b2 = alloca i32, align 4
br label %block_2
block_2:
- %a2val = load i32, i32* %a
- %b2val = load i32, i32* %b
+ %a2val = load i32, ptr %a
+ %b2val = load i32, ptr %b
%add2 = add i32 2, %a2val
%mul2 = mul i32 2, %b2val
br label %block_5
block_3:
- %aval = load i32, i32* %a
- %bval = load i32, i32* %b
+ %aval = load i32, ptr %a
+ %bval = load i32, ptr %b
%add = add i32 2, %aval
%mul = mul i32 2, %bval
br label %block_4
block_4:
- store i32 %add, i32* %output, align 4
- store i32 %mul, i32* %result, align 4
+ store i32 %add, ptr %output, align 4
+ store i32 %mul, ptr %result, align 4
br label %block_6
block_5:
- store i32 %add2, i32* %output, align 4
- store i32 %mul2, i32* %result, align 4
+ store i32 %add2, ptr %output, align 4
+ store i32 %mul2, ptr %result, align 4
br label %block_7
block_6:
%div = udiv i32 %aval, %bval
%b2 = alloca i32, align 4
br label %block_2
block_2:
- %a2val = load i32, i32* %a
- %b2val = load i32, i32* %b
+ %a2val = load i32, ptr %a
+ %b2val = load i32, ptr %b
%add2 = add i32 2, %a2val
%mul2 = mul i32 2, %b2val
br label %block_5
block_3:
- %aval = load i32, i32* %a
- %bval = load i32, i32* %b
+ %aval = load i32, ptr %a
+ %bval = load i32, ptr %b
%add = add i32 2, %aval
%mul = mul i32 2, %bval
br label %block_4
block_4:
- store i32 %add, i32* %output, align 4
- store i32 %mul, i32* %result, align 4
+ store i32 %add, ptr %output, align 4
+ store i32 %mul, ptr %result, align 4
br label %block_7
block_5:
- store i32 %add2, i32* %output, align 4
- store i32 %mul2, i32* %result, align 4
+ store i32 %add2, ptr %output, align 4
+ store i32 %mul2, ptr %result, align 4
br label %block_6
block_6:
%diff = sub i32 %a2val, %b2val
; CHECK-NEXT: [[B2:%.*]] = alloca i32, align 4
; CHECK-NEXT: br label [[BLOCK_2]]
; CHECK: block_2:
-; CHECK-NEXT: [[LT_CAST:%.*]] = bitcast i32* [[A2VAL_LOC]] to i8*
-; CHECK-NEXT: call void @llvm.lifetime.start.p0i8(i64 -1, i8* [[LT_CAST]])
-; CHECK-NEXT: [[LT_CAST1:%.*]] = bitcast i32* [[B2VAL_LOC]] to i8*
-; CHECK-NEXT: call void @llvm.lifetime.start.p0i8(i64 -1, i8* [[LT_CAST1]])
-; CHECK-NEXT: [[LT_CAST2:%.*]] = bitcast i32* [[AVAL_LOC]] to i8*
-; CHECK-NEXT: call void @llvm.lifetime.start.p0i8(i64 -1, i8* [[LT_CAST2]])
-; CHECK-NEXT: [[LT_CAST3:%.*]] = bitcast i32* [[BVAL_LOC]] to i8*
-; CHECK-NEXT: call void @llvm.lifetime.start.p0i8(i64 -1, i8* [[LT_CAST3]])
-; CHECK-NEXT: [[TMP0:%.*]] = call i1 @outlined_ir_func_0(i32* [[A]], i32* [[B]], i32* [[OUTPUT]], i32* [[RESULT]], i32* [[A2VAL_LOC]], i32* [[B2VAL_LOC]], i32* [[AVAL_LOC]], i32* [[BVAL_LOC]], i32 0)
-; CHECK-NEXT: [[A2VAL_RELOAD:%.*]] = load i32, i32* [[A2VAL_LOC]], align 4
-; CHECK-NEXT: [[B2VAL_RELOAD:%.*]] = load i32, i32* [[B2VAL_LOC]], align 4
-; CHECK-NEXT: [[AVAL_RELOAD:%.*]] = load i32, i32* [[AVAL_LOC]], align 4
-; CHECK-NEXT: [[BVAL_RELOAD:%.*]] = load i32, i32* [[BVAL_LOC]], align 4
-; CHECK-NEXT: call void @llvm.lifetime.end.p0i8(i64 -1, i8* [[LT_CAST]])
-; CHECK-NEXT: call void @llvm.lifetime.end.p0i8(i64 -1, i8* [[LT_CAST1]])
-; CHECK-NEXT: call void @llvm.lifetime.end.p0i8(i64 -1, i8* [[LT_CAST2]])
-; CHECK-NEXT: call void @llvm.lifetime.end.p0i8(i64 -1, i8* [[LT_CAST3]])
+; CHECK-NEXT: call void @llvm.lifetime.start.p0(i64 -1, ptr [[A2VAL_LOC]])
+; CHECK-NEXT: call void @llvm.lifetime.start.p0(i64 -1, ptr [[B2VAL_LOC]])
+; CHECK-NEXT: call void @llvm.lifetime.start.p0(i64 -1, ptr [[AVAL_LOC]])
+; CHECK-NEXT: call void @llvm.lifetime.start.p0(i64 -1, ptr [[BVAL_LOC]])
+; CHECK-NEXT: [[TMP0:%.*]] = call i1 @outlined_ir_func_0(ptr [[A]], ptr [[B]], ptr [[OUTPUT]], ptr [[RESULT]], ptr [[A2VAL_LOC]], ptr [[B2VAL_LOC]], ptr [[AVAL_LOC]], ptr [[BVAL_LOC]], i32 0)
+; CHECK-NEXT: [[A2VAL_RELOAD:%.*]] = load i32, ptr [[A2VAL_LOC]], align 4
+; CHECK-NEXT: [[B2VAL_RELOAD:%.*]] = load i32, ptr [[B2VAL_LOC]], align 4
+; CHECK-NEXT: [[AVAL_RELOAD:%.*]] = load i32, ptr [[AVAL_LOC]], align 4
+; CHECK-NEXT: [[BVAL_RELOAD:%.*]] = load i32, ptr [[BVAL_LOC]], align 4
+; CHECK-NEXT: call void @llvm.lifetime.end.p0(i64 -1, ptr [[A2VAL_LOC]])
+; CHECK-NEXT: call void @llvm.lifetime.end.p0(i64 -1, ptr [[B2VAL_LOC]])
+; CHECK-NEXT: call void @llvm.lifetime.end.p0(i64 -1, ptr [[AVAL_LOC]])
+; CHECK-NEXT: call void @llvm.lifetime.end.p0(i64 -1, ptr [[BVAL_LOC]])
; CHECK-NEXT: br i1 [[TMP0]], label [[BLOCK_6:%.*]], label [[BLOCK_7:%.*]]
; CHECK: block_6:
; CHECK-NEXT: [[DIV:%.*]] = udiv i32 [[AVAL_RELOAD]], [[BVAL_RELOAD]]
; CHECK-NEXT: [[B2:%.*]] = alloca i32, align 4
; CHECK-NEXT: br label [[BLOCK_2]]
; CHECK: block_2:
-; CHECK-NEXT: [[LT_CAST:%.*]] = bitcast i32* [[A2VAL_LOC]] to i8*
-; CHECK-NEXT: call void @llvm.lifetime.start.p0i8(i64 -1, i8* [[LT_CAST]])
-; CHECK-NEXT: [[LT_CAST1:%.*]] = bitcast i32* [[B2VAL_LOC]] to i8*
-; CHECK-NEXT: call void @llvm.lifetime.start.p0i8(i64 -1, i8* [[LT_CAST1]])
-; CHECK-NEXT: [[LT_CAST2:%.*]] = bitcast i32* [[ADD_LOC]] to i8*
-; CHECK-NEXT: call void @llvm.lifetime.start.p0i8(i64 -1, i8* [[LT_CAST2]])
-; CHECK-NEXT: [[LT_CAST3:%.*]] = bitcast i32* [[MUL_LOC]] to i8*
-; CHECK-NEXT: call void @llvm.lifetime.start.p0i8(i64 -1, i8* [[LT_CAST3]])
-; CHECK-NEXT: [[TMP0:%.*]] = call i1 @outlined_ir_func_0(i32* [[A]], i32* [[B]], i32* [[OUTPUT]], i32* [[RESULT]], i32* [[A2VAL_LOC]], i32* [[B2VAL_LOC]], i32* [[ADD_LOC]], i32* [[MUL_LOC]], i32 1)
-; CHECK-NEXT: [[A2VAL_RELOAD:%.*]] = load i32, i32* [[A2VAL_LOC]], align 4
-; CHECK-NEXT: [[B2VAL_RELOAD:%.*]] = load i32, i32* [[B2VAL_LOC]], align 4
-; CHECK-NEXT: [[ADD_RELOAD:%.*]] = load i32, i32* [[ADD_LOC]], align 4
-; CHECK-NEXT: [[MUL_RELOAD:%.*]] = load i32, i32* [[MUL_LOC]], align 4
-; CHECK-NEXT: call void @llvm.lifetime.end.p0i8(i64 -1, i8* [[LT_CAST]])
-; CHECK-NEXT: call void @llvm.lifetime.end.p0i8(i64 -1, i8* [[LT_CAST1]])
-; CHECK-NEXT: call void @llvm.lifetime.end.p0i8(i64 -1, i8* [[LT_CAST2]])
-; CHECK-NEXT: call void @llvm.lifetime.end.p0i8(i64 -1, i8* [[LT_CAST3]])
+; CHECK-NEXT: call void @llvm.lifetime.start.p0(i64 -1, ptr [[A2VAL_LOC]])
+; CHECK-NEXT: call void @llvm.lifetime.start.p0(i64 -1, ptr [[B2VAL_LOC]])
+; CHECK-NEXT: call void @llvm.lifetime.start.p0(i64 -1, ptr [[ADD_LOC]])
+; CHECK-NEXT: call void @llvm.lifetime.start.p0(i64 -1, ptr [[MUL_LOC]])
+; CHECK-NEXT: [[TMP0:%.*]] = call i1 @outlined_ir_func_0(ptr [[A]], ptr [[B]], ptr [[OUTPUT]], ptr [[RESULT]], ptr [[A2VAL_LOC]], ptr [[B2VAL_LOC]], ptr [[ADD_LOC]], ptr [[MUL_LOC]], i32 1)
+; CHECK-NEXT: [[A2VAL_RELOAD:%.*]] = load i32, ptr [[A2VAL_LOC]], align 4
+; CHECK-NEXT: [[B2VAL_RELOAD:%.*]] = load i32, ptr [[B2VAL_LOC]], align 4
+; CHECK-NEXT: [[ADD_RELOAD:%.*]] = load i32, ptr [[ADD_LOC]], align 4
+; CHECK-NEXT: [[MUL_RELOAD:%.*]] = load i32, ptr [[MUL_LOC]], align 4
+; CHECK-NEXT: call void @llvm.lifetime.end.p0(i64 -1, ptr [[A2VAL_LOC]])
+; CHECK-NEXT: call void @llvm.lifetime.end.p0(i64 -1, ptr [[B2VAL_LOC]])
+; CHECK-NEXT: call void @llvm.lifetime.end.p0(i64 -1, ptr [[ADD_LOC]])
+; CHECK-NEXT: call void @llvm.lifetime.end.p0(i64 -1, ptr [[MUL_LOC]])
; CHECK-NEXT: br i1 [[TMP0]], label [[BLOCK_7:%.*]], label [[BLOCK_6:%.*]]
; CHECK: block_6:
; CHECK-NEXT: [[DIFF:%.*]] = sub i32 [[A2VAL_RELOAD]], [[B2VAL_RELOAD]]
; CHECK-NEXT: newFuncRoot:
; CHECK-NEXT: br label [[BLOCK_2_TO_OUTLINE:%.*]]
; CHECK: block_2_to_outline:
-; CHECK-NEXT: [[A2VAL:%.*]] = load i32, i32* [[TMP0:%.*]], align 4
-; CHECK-NEXT: [[B2VAL:%.*]] = load i32, i32* [[TMP1:%.*]], align 4
+; CHECK-NEXT: [[A2VAL:%.*]] = load i32, ptr [[TMP0:%.*]], align 4
+; CHECK-NEXT: [[B2VAL:%.*]] = load i32, ptr [[TMP1:%.*]], align 4
; CHECK-NEXT: [[ADD2:%.*]] = add i32 2, [[A2VAL]]
; CHECK-NEXT: [[MUL2:%.*]] = mul i32 2, [[B2VAL]]
; CHECK-NEXT: br label [[BLOCK_5:%.*]]
; CHECK: block_3:
-; CHECK-NEXT: [[AVAL:%.*]] = load i32, i32* [[TMP0]], align 4
-; CHECK-NEXT: [[BVAL:%.*]] = load i32, i32* [[TMP1]], align 4
+; CHECK-NEXT: [[AVAL:%.*]] = load i32, ptr [[TMP0]], align 4
+; CHECK-NEXT: [[BVAL:%.*]] = load i32, ptr [[TMP1]], align 4
; CHECK-NEXT: [[ADD:%.*]] = add i32 2, [[AVAL]]
; CHECK-NEXT: [[MUL:%.*]] = mul i32 2, [[BVAL]]
; CHECK-NEXT: br label [[BLOCK_4:%.*]]
; CHECK: block_4:
-; CHECK-NEXT: store i32 [[ADD]], i32* [[TMP2:%.*]], align 4
-; CHECK-NEXT: store i32 [[MUL]], i32* [[TMP3:%.*]], align 4
+; CHECK-NEXT: store i32 [[ADD]], ptr [[TMP2:%.*]], align 4
+; CHECK-NEXT: store i32 [[MUL]], ptr [[TMP3:%.*]], align 4
; CHECK-NEXT: br label [[BLOCK_6_EXITSTUB:%.*]]
; CHECK: block_5:
-; CHECK-NEXT: store i32 [[ADD2]], i32* [[TMP2]], align 4
-; CHECK-NEXT: store i32 [[MUL2]], i32* [[TMP3]], align 4
+; CHECK-NEXT: store i32 [[ADD2]], ptr [[TMP2]], align 4
+; CHECK-NEXT: store i32 [[MUL2]], ptr [[TMP3]], align 4
; CHECK-NEXT: br label [[BLOCK_7_EXITSTUB:%.*]]
; CHECK: block_6.exitStub:
; CHECK-NEXT: switch i32 [[TMP8:%.*]], label [[FINAL_BLOCK_1:%.*]] [
; CHECK-NEXT: i32 1, label [[OUTPUT_BLOCK_1_0:%.*]]
; CHECK-NEXT: ]
; CHECK: output_block_0_0:
-; CHECK-NEXT: store i32 [[A2VAL]], i32* [[TMP4:%.*]], align 4
-; CHECK-NEXT: store i32 [[B2VAL]], i32* [[TMP5:%.*]], align 4
+; CHECK-NEXT: store i32 [[A2VAL]], ptr [[TMP4:%.*]], align 4
+; CHECK-NEXT: store i32 [[B2VAL]], ptr [[TMP5:%.*]], align 4
; CHECK-NEXT: br label [[FINAL_BLOCK_0]]
; CHECK: output_block_0_1:
-; CHECK-NEXT: store i32 [[AVAL]], i32* [[TMP6:%.*]], align 4
-; CHECK-NEXT: store i32 [[BVAL]], i32* [[TMP7:%.*]], align 4
+; CHECK-NEXT: store i32 [[AVAL]], ptr [[TMP6:%.*]], align 4
+; CHECK-NEXT: store i32 [[BVAL]], ptr [[TMP7:%.*]], align 4
; CHECK-NEXT: br label [[FINAL_BLOCK_1]]
; CHECK: output_block_1_0:
-; CHECK-NEXT: store i32 [[A2VAL]], i32* [[TMP4]], align 4
-; CHECK-NEXT: store i32 [[B2VAL]], i32* [[TMP5]], align 4
+; CHECK-NEXT: store i32 [[A2VAL]], ptr [[TMP4]], align 4
+; CHECK-NEXT: store i32 [[B2VAL]], ptr [[TMP5]], align 4
; CHECK-NEXT: br label [[FINAL_BLOCK_0]]
; CHECK: output_block_1_1:
-; CHECK-NEXT: store i32 [[ADD]], i32* [[TMP6]], align 4
-; CHECK-NEXT: store i32 [[MUL]], i32* [[TMP7]], align 4
+; CHECK-NEXT: store i32 [[ADD]], ptr [[TMP6]], align 4
+; CHECK-NEXT: store i32 [[MUL]], ptr [[TMP7]], align 4
; CHECK-NEXT: br label [[FINAL_BLOCK_1]]
; CHECK: final_block_0:
; CHECK-NEXT: ret i1 false
%b2 = alloca i32, align 4
br label %block_2
block_2:
- %a2val = load i32, i32* %a
- %b2val = load i32, i32* %b
+ %a2val = load i32, ptr %a
+ %b2val = load i32, ptr %b
%add2 = add i32 2, %a2val
%mul2 = mul i32 2, %b2val
br label %block_5
block_3:
- %aval = load i32, i32* %a
- %bval = load i32, i32* %b
+ %aval = load i32, ptr %a
+ %bval = load i32, ptr %b
%add = add i32 2, %aval
%mul = mul i32 2, %bval
br label %block_4
block_4:
- store i32 %add, i32* %output, align 4
- store i32 %mul, i32* %result, align 4
+ store i32 %add, ptr %output, align 4
+ store i32 %mul, ptr %result, align 4
br label %block_6
block_5:
- store i32 %add2, i32* %output, align 4
- store i32 %mul2, i32* %result, align 4
+ store i32 %add2, ptr %output, align 4
+ store i32 %mul2, ptr %result, align 4
br label %block_7
block_6:
ret void
%b2 = alloca i32, align 4
br label %block_2
block_2:
- %a2val = load i32, i32* %a
- %b2val = load i32, i32* %b
+ %a2val = load i32, ptr %a
+ %b2val = load i32, ptr %b
%add2 = add i32 2, %a2val
%mul2 = mul i32 2, %b2val
br label %block_5
block_3:
- %aval = load i32, i32* %a
- %bval = load i32, i32* %b
+ %aval = load i32, ptr %a
+ %bval = load i32, ptr %b
%add = add i32 2, %aval
%mul = mul i32 2, %bval
br label %block_4
block_4:
- store i32 %add, i32* %output, align 4
- store i32 %mul, i32* %result, align 4
+ store i32 %add, ptr %output, align 4
+ store i32 %mul, ptr %result, align 4
br label %block_7
block_5:
- store i32 %add2, i32* %output, align 4
- store i32 %mul2, i32* %result, align 4
+ store i32 %add2, ptr %output, align 4
+ store i32 %mul2, ptr %result, align 4
br label %block_6
block_6:
%diff = sub i32 %a2val, %b2val
; CHECK-NEXT: [[B2:%.*]] = alloca i32, align 4
; CHECK-NEXT: br label [[BLOCK_2]]
; CHECK: block_2:
-; CHECK-NEXT: [[TMP0:%.*]] = call i1 @outlined_ir_func_0(i32* [[A]], i32* [[B]], i32* [[OUTPUT]], i32* [[RESULT]], i32* null, i32* null, i32* null, i32* null, i32 -1)
+; CHECK-NEXT: [[TMP0:%.*]] = call i1 @outlined_ir_func_0(ptr [[A]], ptr [[B]], ptr [[OUTPUT]], ptr [[RESULT]], ptr null, ptr null, ptr null, ptr null, i32 -1)
; CHECK-NEXT: br i1 [[TMP0]], label [[BLOCK_6:%.*]], label [[BLOCK_7:%.*]]
; CHECK: block_6:
; CHECK-NEXT: ret void
; CHECK-NEXT: [[B2:%.*]] = alloca i32, align 4
; CHECK-NEXT: br label [[BLOCK_2]]
; CHECK: block_2:
-; CHECK-NEXT: [[LT_CAST:%.*]] = bitcast i32* [[A2VAL_LOC]] to i8*
-; CHECK-NEXT: call void @llvm.lifetime.start.p0i8(i64 -1, i8* [[LT_CAST]])
-; CHECK-NEXT: [[LT_CAST1:%.*]] = bitcast i32* [[B2VAL_LOC]] to i8*
-; CHECK-NEXT: call void @llvm.lifetime.start.p0i8(i64 -1, i8* [[LT_CAST1]])
-; CHECK-NEXT: [[LT_CAST2:%.*]] = bitcast i32* [[ADD_LOC]] to i8*
-; CHECK-NEXT: call void @llvm.lifetime.start.p0i8(i64 -1, i8* [[LT_CAST2]])
-; CHECK-NEXT: [[LT_CAST3:%.*]] = bitcast i32* [[MUL_LOC]] to i8*
-; CHECK-NEXT: call void @llvm.lifetime.start.p0i8(i64 -1, i8* [[LT_CAST3]])
-; CHECK-NEXT: [[TMP0:%.*]] = call i1 @outlined_ir_func_0(i32* [[A]], i32* [[B]], i32* [[OUTPUT]], i32* [[RESULT]], i32* [[A2VAL_LOC]], i32* [[B2VAL_LOC]], i32* [[ADD_LOC]], i32* [[MUL_LOC]], i32 0)
-; CHECK-NEXT: [[A2VAL_RELOAD:%.*]] = load i32, i32* [[A2VAL_LOC]], align 4
-; CHECK-NEXT: [[B2VAL_RELOAD:%.*]] = load i32, i32* [[B2VAL_LOC]], align 4
-; CHECK-NEXT: [[ADD_RELOAD:%.*]] = load i32, i32* [[ADD_LOC]], align 4
-; CHECK-NEXT: [[MUL_RELOAD:%.*]] = load i32, i32* [[MUL_LOC]], align 4
-; CHECK-NEXT: call void @llvm.lifetime.end.p0i8(i64 -1, i8* [[LT_CAST]])
-; CHECK-NEXT: call void @llvm.lifetime.end.p0i8(i64 -1, i8* [[LT_CAST1]])
-; CHECK-NEXT: call void @llvm.lifetime.end.p0i8(i64 -1, i8* [[LT_CAST2]])
-; CHECK-NEXT: call void @llvm.lifetime.end.p0i8(i64 -1, i8* [[LT_CAST3]])
+; CHECK-NEXT: call void @llvm.lifetime.start.p0(i64 -1, ptr [[A2VAL_LOC]])
+; CHECK-NEXT: call void @llvm.lifetime.start.p0(i64 -1, ptr [[B2VAL_LOC]])
+; CHECK-NEXT: call void @llvm.lifetime.start.p0(i64 -1, ptr [[ADD_LOC]])
+; CHECK-NEXT: call void @llvm.lifetime.start.p0(i64 -1, ptr [[MUL_LOC]])
+; CHECK-NEXT: [[TMP0:%.*]] = call i1 @outlined_ir_func_0(ptr [[A]], ptr [[B]], ptr [[OUTPUT]], ptr [[RESULT]], ptr [[A2VAL_LOC]], ptr [[B2VAL_LOC]], ptr [[ADD_LOC]], ptr [[MUL_LOC]], i32 0)
+; CHECK-NEXT: [[A2VAL_RELOAD:%.*]] = load i32, ptr [[A2VAL_LOC]], align 4
+; CHECK-NEXT: [[B2VAL_RELOAD:%.*]] = load i32, ptr [[B2VAL_LOC]], align 4
+; CHECK-NEXT: [[ADD_RELOAD:%.*]] = load i32, ptr [[ADD_LOC]], align 4
+; CHECK-NEXT: [[MUL_RELOAD:%.*]] = load i32, ptr [[MUL_LOC]], align 4
+; CHECK-NEXT: call void @llvm.lifetime.end.p0(i64 -1, ptr [[A2VAL_LOC]])
+; CHECK-NEXT: call void @llvm.lifetime.end.p0(i64 -1, ptr [[B2VAL_LOC]])
+; CHECK-NEXT: call void @llvm.lifetime.end.p0(i64 -1, ptr [[ADD_LOC]])
+; CHECK-NEXT: call void @llvm.lifetime.end.p0(i64 -1, ptr [[MUL_LOC]])
; CHECK-NEXT: br i1 [[TMP0]], label [[BLOCK_7:%.*]], label [[BLOCK_6:%.*]]
; CHECK: block_6:
; CHECK-NEXT: [[DIFF:%.*]] = sub i32 [[A2VAL_RELOAD]], [[B2VAL_RELOAD]]
; CHECK-NEXT: newFuncRoot:
; CHECK-NEXT: br label [[BLOCK_2_TO_OUTLINE:%.*]]
; CHECK: block_2_to_outline:
-; CHECK-NEXT: [[A2VAL:%.*]] = load i32, i32* [[TMP0:%.*]], align 4
-; CHECK-NEXT: [[B2VAL:%.*]] = load i32, i32* [[TMP1:%.*]], align 4
+; CHECK-NEXT: [[A2VAL:%.*]] = load i32, ptr [[TMP0:%.*]], align 4
+; CHECK-NEXT: [[B2VAL:%.*]] = load i32, ptr [[TMP1:%.*]], align 4
; CHECK-NEXT: [[ADD2:%.*]] = add i32 2, [[A2VAL]]
; CHECK-NEXT: [[MUL2:%.*]] = mul i32 2, [[B2VAL]]
; CHECK-NEXT: br label [[BLOCK_5:%.*]]
; CHECK: block_3:
-; CHECK-NEXT: [[AVAL:%.*]] = load i32, i32* [[TMP0]], align 4
-; CHECK-NEXT: [[BVAL:%.*]] = load i32, i32* [[TMP1]], align 4
+; CHECK-NEXT: [[AVAL:%.*]] = load i32, ptr [[TMP0]], align 4
+; CHECK-NEXT: [[BVAL:%.*]] = load i32, ptr [[TMP1]], align 4
; CHECK-NEXT: [[ADD:%.*]] = add i32 2, [[AVAL]]
; CHECK-NEXT: [[MUL:%.*]] = mul i32 2, [[BVAL]]
; CHECK-NEXT: br label [[BLOCK_4:%.*]]
; CHECK: block_4:
-; CHECK-NEXT: store i32 [[ADD]], i32* [[TMP2:%.*]], align 4
-; CHECK-NEXT: store i32 [[MUL]], i32* [[TMP3:%.*]], align 4
+; CHECK-NEXT: store i32 [[ADD]], ptr [[TMP2:%.*]], align 4
+; CHECK-NEXT: store i32 [[MUL]], ptr [[TMP3:%.*]], align 4
; CHECK-NEXT: br label [[BLOCK_6_EXITSTUB:%.*]]
; CHECK: block_5:
-; CHECK-NEXT: store i32 [[ADD2]], i32* [[TMP2]], align 4
-; CHECK-NEXT: store i32 [[MUL2]], i32* [[TMP3]], align 4
+; CHECK-NEXT: store i32 [[ADD2]], ptr [[TMP2]], align 4
+; CHECK-NEXT: store i32 [[MUL2]], ptr [[TMP3]], align 4
; CHECK-NEXT: br label [[BLOCK_7_EXITSTUB:%.*]]
; CHECK: block_6.exitStub:
; CHECK-NEXT: switch i32 [[TMP8:%.*]], label [[FINAL_BLOCK_1:%.*]] [
; CHECK-NEXT: i32 0, label [[OUTPUT_BLOCK_1_0:%.*]]
; CHECK-NEXT: ]
; CHECK: output_block_1_0:
-; CHECK-NEXT: store i32 [[A2VAL]], i32* [[TMP4:%.*]], align 4
-; CHECK-NEXT: store i32 [[B2VAL]], i32* [[TMP5:%.*]], align 4
+; CHECK-NEXT: store i32 [[A2VAL]], ptr [[TMP4:%.*]], align 4
+; CHECK-NEXT: store i32 [[B2VAL]], ptr [[TMP5:%.*]], align 4
; CHECK-NEXT: br label [[FINAL_BLOCK_0]]
; CHECK: output_block_1_1:
-; CHECK-NEXT: store i32 [[ADD]], i32* [[TMP6:%.*]], align 4
-; CHECK-NEXT: store i32 [[MUL]], i32* [[TMP7:%.*]], align 4
+; CHECK-NEXT: store i32 [[ADD]], ptr [[TMP6:%.*]], align 4
+; CHECK-NEXT: store i32 [[MUL]], ptr [[TMP7:%.*]], align 4
; CHECK-NEXT: br label [[FINAL_BLOCK_1]]
; CHECK: final_block_0:
; CHECK-NEXT: ret i1 false
%b2 = alloca i32, align 4
br label %block_2
block_2:
- %a2val = load i32, i32* %a
- %b2val = load i32, i32* %b
+ %a2val = load i32, ptr %a
+ %b2val = load i32, ptr %b
%add2 = add i32 2, %a2val
%mul2 = mul i32 2, %b2val
br label %block_5
block_3:
- %aval = load i32, i32* %a
- %bval = load i32, i32* %b
+ %aval = load i32, ptr %a
+ %bval = load i32, ptr %b
%add = add i32 2, %aval
%mul = mul i32 2, %bval
br label %block_4
block_4:
- store i32 %add, i32* %output, align 4
- store i32 %mul, i32* %result, align 4
+ store i32 %add, ptr %output, align 4
+ store i32 %mul, ptr %result, align 4
br label %block_6
block_5:
- store i32 %add2, i32* %output, align 4
- store i32 %mul2, i32* %result, align 4
+ store i32 %add2, ptr %output, align 4
+ store i32 %mul2, ptr %result, align 4
br label %block_7
block_6:
%div = udiv i32 %aval, %bval
%b2 = alloca i32, align 4
br label %block_2
block_2:
- %a2val = load i32, i32* %a
- %b2val = load i32, i32* %b
+ %a2val = load i32, ptr %a
+ %b2val = load i32, ptr %b
%add2 = add i32 2, %a2val
%mul2 = mul i32 2, %b2val
br label %block_5
block_3:
- %aval = load i32, i32* %a
- %bval = load i32, i32* %b
+ %aval = load i32, ptr %a
+ %bval = load i32, ptr %b
%add = add i32 2, %aval
%mul = mul i32 2, %bval
br label %block_4
block_4:
- store i32 %add, i32* %output, align 4
- store i32 %mul, i32* %result, align 4
+ store i32 %add, ptr %output, align 4
+ store i32 %mul, ptr %result, align 4
br label %block_7
block_5:
- store i32 %add2, i32* %output, align 4
- store i32 %mul2, i32* %result, align 4
+ store i32 %add2, ptr %output, align 4
+ store i32 %mul2, ptr %result, align 4
br label %block_6
block_6:
%diff = sub i32 %a2val, %b2val
; CHECK-NEXT: [[B2:%.*]] = alloca i32, align 4
; CHECK-NEXT: br label [[BLOCK_2]]
; CHECK: block_2:
-; CHECK-NEXT: [[LT_CAST:%.*]] = bitcast i32* [[A2VAL_LOC]] to i8*
-; CHECK-NEXT: call void @llvm.lifetime.start.p0i8(i64 -1, i8* [[LT_CAST]])
-; CHECK-NEXT: [[LT_CAST1:%.*]] = bitcast i32* [[B2VAL_LOC]] to i8*
-; CHECK-NEXT: call void @llvm.lifetime.start.p0i8(i64 -1, i8* [[LT_CAST1]])
-; CHECK-NEXT: [[LT_CAST2:%.*]] = bitcast i32* [[AVAL_LOC]] to i8*
-; CHECK-NEXT: call void @llvm.lifetime.start.p0i8(i64 -1, i8* [[LT_CAST2]])
-; CHECK-NEXT: [[LT_CAST3:%.*]] = bitcast i32* [[BVAL_LOC]] to i8*
-; CHECK-NEXT: call void @llvm.lifetime.start.p0i8(i64 -1, i8* [[LT_CAST3]])
-; CHECK-NEXT: [[TARGETBLOCK:%.*]] = call i1 @outlined_ir_func_0(i32* [[A]], i32* [[B]], i32* [[OUTPUT]], i32* [[RESULT]], i32* [[A2VAL_LOC]], i32* [[B2VAL_LOC]], i32* [[AVAL_LOC]], i32* [[BVAL_LOC]])
-; CHECK-NEXT: [[A2VAL_RELOAD:%.*]] = load i32, i32* [[A2VAL_LOC]], align 4
-; CHECK-NEXT: [[B2VAL_RELOAD:%.*]] = load i32, i32* [[B2VAL_LOC]], align 4
-; CHECK-NEXT: [[AVAL_RELOAD:%.*]] = load i32, i32* [[AVAL_LOC]], align 4
-; CHECK-NEXT: [[BVAL_RELOAD:%.*]] = load i32, i32* [[BVAL_LOC]], align 4
-; CHECK-NEXT: call void @llvm.lifetime.end.p0i8(i64 -1, i8* [[LT_CAST]])
-; CHECK-NEXT: call void @llvm.lifetime.end.p0i8(i64 -1, i8* [[LT_CAST1]])
-; CHECK-NEXT: call void @llvm.lifetime.end.p0i8(i64 -1, i8* [[LT_CAST2]])
-; CHECK-NEXT: call void @llvm.lifetime.end.p0i8(i64 -1, i8* [[LT_CAST3]])
+; CHECK-NEXT: call void @llvm.lifetime.start.p0(i64 -1, ptr [[A2VAL_LOC]])
+; CHECK-NEXT: call void @llvm.lifetime.start.p0(i64 -1, ptr [[B2VAL_LOC]])
+; CHECK-NEXT: call void @llvm.lifetime.start.p0(i64 -1, ptr [[AVAL_LOC]])
+; CHECK-NEXT: call void @llvm.lifetime.start.p0(i64 -1, ptr [[BVAL_LOC]])
+; CHECK-NEXT: [[TARGETBLOCK:%.*]] = call i1 @outlined_ir_func_0(ptr [[A]], ptr [[B]], ptr [[OUTPUT]], ptr [[RESULT]], ptr [[A2VAL_LOC]], ptr [[B2VAL_LOC]], ptr [[AVAL_LOC]], ptr [[BVAL_LOC]])
+; CHECK-NEXT: [[A2VAL_RELOAD:%.*]] = load i32, ptr [[A2VAL_LOC]], align 4
+; CHECK-NEXT: [[B2VAL_RELOAD:%.*]] = load i32, ptr [[B2VAL_LOC]], align 4
+; CHECK-NEXT: [[AVAL_RELOAD:%.*]] = load i32, ptr [[AVAL_LOC]], align 4
+; CHECK-NEXT: [[BVAL_RELOAD:%.*]] = load i32, ptr [[BVAL_LOC]], align 4
+; CHECK-NEXT: call void @llvm.lifetime.end.p0(i64 -1, ptr [[A2VAL_LOC]])
+; CHECK-NEXT: call void @llvm.lifetime.end.p0(i64 -1, ptr [[B2VAL_LOC]])
+; CHECK-NEXT: call void @llvm.lifetime.end.p0(i64 -1, ptr [[AVAL_LOC]])
+; CHECK-NEXT: call void @llvm.lifetime.end.p0(i64 -1, ptr [[BVAL_LOC]])
; CHECK-NEXT: br i1 [[TARGETBLOCK]], label [[BLOCK_6:%.*]], label [[BLOCK_7:%.*]]
; CHECK: block_6:
; CHECK-NEXT: [[DIV:%.*]] = udiv i32 [[AVAL_RELOAD]], [[BVAL_RELOAD]]
; CHECK-NEXT: [[B2:%.*]] = alloca i32, align 4
; CHECK-NEXT: br label [[BLOCK_2]]
; CHECK: block_2:
-; CHECK-NEXT: [[LT_CAST:%.*]] = bitcast i32* [[A2VAL_LOC]] to i8*
-; CHECK-NEXT: call void @llvm.lifetime.start.p0i8(i64 -1, i8* [[LT_CAST]])
-; CHECK-NEXT: [[LT_CAST1:%.*]] = bitcast i32* [[B2VAL_LOC]] to i8*
-; CHECK-NEXT: call void @llvm.lifetime.start.p0i8(i64 -1, i8* [[LT_CAST1]])
-; CHECK-NEXT: [[LT_CAST2:%.*]] = bitcast i32* [[AVAL_LOC]] to i8*
-; CHECK-NEXT: call void @llvm.lifetime.start.p0i8(i64 -1, i8* [[LT_CAST2]])
-; CHECK-NEXT: [[LT_CAST3:%.*]] = bitcast i32* [[BVAL_LOC]] to i8*
-; CHECK-NEXT: call void @llvm.lifetime.start.p0i8(i64 -1, i8* [[LT_CAST3]])
-; CHECK-NEXT: [[TARGETBLOCK:%.*]] = call i1 @outlined_ir_func_0(i32* [[A]], i32* [[B]], i32* [[OUTPUT]], i32* [[RESULT]], i32* [[A2VAL_LOC]], i32* [[B2VAL_LOC]], i32* [[AVAL_LOC]], i32* [[BVAL_LOC]])
-; CHECK-NEXT: [[A2VAL_RELOAD:%.*]] = load i32, i32* [[A2VAL_LOC]], align 4
-; CHECK-NEXT: [[B2VAL_RELOAD:%.*]] = load i32, i32* [[B2VAL_LOC]], align 4
-; CHECK-NEXT: [[AVAL_RELOAD:%.*]] = load i32, i32* [[AVAL_LOC]], align 4
-; CHECK-NEXT: [[BVAL_RELOAD:%.*]] = load i32, i32* [[BVAL_LOC]], align 4
-; CHECK-NEXT: call void @llvm.lifetime.end.p0i8(i64 -1, i8* [[LT_CAST]])
-; CHECK-NEXT: call void @llvm.lifetime.end.p0i8(i64 -1, i8* [[LT_CAST1]])
-; CHECK-NEXT: call void @llvm.lifetime.end.p0i8(i64 -1, i8* [[LT_CAST2]])
-; CHECK-NEXT: call void @llvm.lifetime.end.p0i8(i64 -1, i8* [[LT_CAST3]])
+; CHECK-NEXT: call void @llvm.lifetime.start.p0(i64 -1, ptr [[A2VAL_LOC]])
+; CHECK-NEXT: call void @llvm.lifetime.start.p0(i64 -1, ptr [[B2VAL_LOC]])
+; CHECK-NEXT: call void @llvm.lifetime.start.p0(i64 -1, ptr [[AVAL_LOC]])
+; CHECK-NEXT: call void @llvm.lifetime.start.p0(i64 -1, ptr [[BVAL_LOC]])
+; CHECK-NEXT: [[TARGETBLOCK:%.*]] = call i1 @outlined_ir_func_0(ptr [[A]], ptr [[B]], ptr [[OUTPUT]], ptr [[RESULT]], ptr [[A2VAL_LOC]], ptr [[B2VAL_LOC]], ptr [[AVAL_LOC]], ptr [[BVAL_LOC]])
+; CHECK-NEXT: [[A2VAL_RELOAD:%.*]] = load i32, ptr [[A2VAL_LOC]], align 4
+; CHECK-NEXT: [[B2VAL_RELOAD:%.*]] = load i32, ptr [[B2VAL_LOC]], align 4
+; CHECK-NEXT: [[AVAL_RELOAD:%.*]] = load i32, ptr [[AVAL_LOC]], align 4
+; CHECK-NEXT: [[BVAL_RELOAD:%.*]] = load i32, ptr [[BVAL_LOC]], align 4
+; CHECK-NEXT: call void @llvm.lifetime.end.p0(i64 -1, ptr [[A2VAL_LOC]])
+; CHECK-NEXT: call void @llvm.lifetime.end.p0(i64 -1, ptr [[B2VAL_LOC]])
+; CHECK-NEXT: call void @llvm.lifetime.end.p0(i64 -1, ptr [[AVAL_LOC]])
+; CHECK-NEXT: call void @llvm.lifetime.end.p0(i64 -1, ptr [[BVAL_LOC]])
; CHECK-NEXT: br i1 [[TARGETBLOCK]], label [[BLOCK_7:%.*]], label [[BLOCK_6:%.*]]
; CHECK: block_6:
; CHECK-NEXT: [[DIFF:%.*]] = sub i32 [[A2VAL_RELOAD]], [[B2VAL_RELOAD]]
; CHECK-NEXT: newFuncRoot:
; CHECK-NEXT: br label [[BLOCK_2_TO_OUTLINE:%.*]]
; CHECK: block_2_to_outline:
-; CHECK-NEXT: [[A2VAL:%.*]] = load i32, i32* [[TMP0:%.*]], align 4
-; CHECK-NEXT: [[B2VAL:%.*]] = load i32, i32* [[TMP1:%.*]], align 4
+; CHECK-NEXT: [[A2VAL:%.*]] = load i32, ptr [[TMP0:%.*]], align 4
+; CHECK-NEXT: [[B2VAL:%.*]] = load i32, ptr [[TMP1:%.*]], align 4
; CHECK-NEXT: [[ADD2:%.*]] = add i32 2, [[A2VAL]]
; CHECK-NEXT: [[MUL2:%.*]] = mul i32 2, [[B2VAL]]
; CHECK-NEXT: br label [[BLOCK_5:%.*]]
; CHECK: block_3:
-; CHECK-NEXT: [[AVAL:%.*]] = load i32, i32* [[TMP0]], align 4
-; CHECK-NEXT: [[BVAL:%.*]] = load i32, i32* [[TMP1]], align 4
+; CHECK-NEXT: [[AVAL:%.*]] = load i32, ptr [[TMP0]], align 4
+; CHECK-NEXT: [[BVAL:%.*]] = load i32, ptr [[TMP1]], align 4
; CHECK-NEXT: [[ADD:%.*]] = add i32 2, [[AVAL]]
; CHECK-NEXT: [[MUL:%.*]] = mul i32 2, [[BVAL]]
; CHECK-NEXT: br label [[BLOCK_4:%.*]]
; CHECK: block_4:
-; CHECK-NEXT: store i32 [[ADD]], i32* [[TMP2:%.*]], align 4
-; CHECK-NEXT: store i32 [[MUL]], i32* [[TMP3:%.*]], align 4
+; CHECK-NEXT: store i32 [[ADD]], ptr [[TMP2:%.*]], align 4
+; CHECK-NEXT: store i32 [[MUL]], ptr [[TMP3:%.*]], align 4
; CHECK-NEXT: br label [[BLOCK_6_EXITSTUB:%.*]]
; CHECK: block_5:
-; CHECK-NEXT: store i32 [[ADD2]], i32* [[TMP2]], align 4
-; CHECK-NEXT: store i32 [[MUL2]], i32* [[TMP3]], align 4
+; CHECK-NEXT: store i32 [[ADD2]], ptr [[TMP2]], align 4
+; CHECK-NEXT: store i32 [[MUL2]], ptr [[TMP3]], align 4
; CHECK-NEXT: br label [[BLOCK_7_EXITSTUB:%.*]]
; CHECK: block_6.exitStub:
-; CHECK-NEXT: store i32 [[AVAL]], i32* [[TMP6:%.*]], align 4
-; CHECK-NEXT: store i32 [[BVAL]], i32* [[TMP7:%.*]], align 4
+; CHECK-NEXT: store i32 [[AVAL]], ptr [[TMP6:%.*]], align 4
+; CHECK-NEXT: store i32 [[BVAL]], ptr [[TMP7:%.*]], align 4
; CHECK-NEXT: ret i1 true
; CHECK: block_7.exitStub:
-; CHECK-NEXT: store i32 [[A2VAL]], i32* [[TMP4:%.*]], align 4
-; CHECK-NEXT: store i32 [[B2VAL]], i32* [[TMP5:%.*]], align 4
+; CHECK-NEXT: store i32 [[A2VAL]], ptr [[TMP4:%.*]], align 4
+; CHECK-NEXT: store i32 [[B2VAL]], ptr [[TMP5:%.*]], align 4
; CHECK-NEXT: ret i1 false
;
define void @f1() {
%a = alloca i32, align 4
- store i32 2, i32* %a, align 4
+ store i32 2, ptr %a, align 4
musttail call void @musttail()
ret void
}
define void @f2() {
%a = alloca i32, align 4
- store i32 2, i32* %a, align 4
+ store i32 2, ptr %a, align 4
musttail call void @musttail()
ret void
}
; CHECK-LABEL: @f1(
; CHECK-NEXT: [[A:%.*]] = alloca i32, align 4
-; CHECK-NEXT: store i32 2, i32* [[A]], align 4
+; CHECK-NEXT: store i32 2, ptr [[A]], align 4
; CHECK-NEXT: musttail call void @musttail()
; CHECK-NEXT: ret void
;
;
; CHECK-LABEL: @f2(
; CHECK-NEXT: [[A:%.*]] = alloca i32, align 4
-; CHECK-NEXT: store i32 2, i32* [[A]], align 4
+; CHECK-NEXT: store i32 2, ptr [[A]], align 4
; CHECK-NEXT: musttail call void @musttail()
; CHECK-NEXT: ret void
;
; ODR-NEXT: [[A:%.*]] = alloca i32, align 4
; ODR-NEXT: [[B:%.*]] = alloca i32, align 4
; ODR-NEXT: [[C:%.*]] = alloca i32, align 4
-; ODR-NEXT: call void @outlined_ir_func_0(i32* [[A]], i32* [[B]], i32* [[C]])
+; ODR-NEXT: call void @outlined_ir_func_0(ptr [[A]], ptr [[B]], ptr [[C]])
; ODR-NEXT: ret void
; CHECK-LABEL: @outline_odr1(
; CHECK-NEXT: entry:
; CHECK-NEXT: [[A:%.*]] = alloca i32, align 4
; CHECK-NEXT: [[B:%.*]] = alloca i32, align 4
; CHECK-NEXT: [[C:%.*]] = alloca i32, align 4
-; CHECK-NEXT: store i32 2, i32* [[A]], align 4
-; CHECK-NEXT: store i32 3, i32* [[B]], align 4
-; CHECK-NEXT: store i32 4, i32* [[C]], align 4
-; CHECK-NEXT: [[AL:%.*]] = load i32, i32* [[A]], align 4
-; CHECK-NEXT: [[BL:%.*]] = load i32, i32* [[B]], align 4
-; CHECK-NEXT: [[CL:%.*]] = load i32, i32* [[C]], align 4
+; CHECK-NEXT: store i32 2, ptr [[A]], align 4
+; CHECK-NEXT: store i32 3, ptr [[B]], align 4
+; CHECK-NEXT: store i32 4, ptr [[C]], align 4
+; CHECK-NEXT: [[AL:%.*]] = load i32, ptr [[A]], align 4
+; CHECK-NEXT: [[BL:%.*]] = load i32, ptr [[B]], align 4
+; CHECK-NEXT: [[CL:%.*]] = load i32, ptr [[C]], align 4
entry:
%a = alloca i32, align 4
%b = alloca i32, align 4
%c = alloca i32, align 4
- store i32 2, i32* %a, align 4
- store i32 3, i32* %b, align 4
- store i32 4, i32* %c, align 4
- %al = load i32, i32* %a
- %bl = load i32, i32* %b
- %cl = load i32, i32* %c
+ store i32 2, ptr %a, align 4
+ store i32 3, ptr %b, align 4
+ store i32 4, ptr %c, align 4
+ %al = load i32, ptr %a
+ %bl = load i32, ptr %b
+ %cl = load i32, ptr %c
ret void
}
; ODR-NEXT: [[A:%.*]] = alloca i32, align 4
; ODR-NEXT: [[B:%.*]] = alloca i32, align 4
; ODR-NEXT: [[C:%.*]] = alloca i32, align 4
-; ODR-NEXT: call void @outlined_ir_func_0(i32* [[A]], i32* [[B]], i32* [[C]])
+; ODR-NEXT: call void @outlined_ir_func_0(ptr [[A]], ptr [[B]], ptr [[C]])
; ODR-NEXT: ret void
; CHECK-LABEL: @outline_odr2(
; CHECK-NEXT: entry:
; CHECK-NEXT: [[A:%.*]] = alloca i32, align 4
; CHECK-NEXT: [[B:%.*]] = alloca i32, align 4
; CHECK-NEXT: [[C:%.*]] = alloca i32, align 4
-; CHECK-NEXT: store i32 2, i32* [[A]], align 4
-; CHECK-NEXT: store i32 3, i32* [[B]], align 4
-; CHECK-NEXT: store i32 4, i32* [[C]], align 4
-; CHECK-NEXT: [[AL:%.*]] = load i32, i32* [[A]], align 4
-; CHECK-NEXT: [[BL:%.*]] = load i32, i32* [[B]], align 4
-; CHECK-NEXT: [[CL:%.*]] = load i32, i32* [[C]], align 4
+; CHECK-NEXT: store i32 2, ptr [[A]], align 4
+; CHECK-NEXT: store i32 3, ptr [[B]], align 4
+; CHECK-NEXT: store i32 4, ptr [[C]], align 4
+; CHECK-NEXT: [[AL:%.*]] = load i32, ptr [[A]], align 4
+; CHECK-NEXT: [[BL:%.*]] = load i32, ptr [[B]], align 4
+; CHECK-NEXT: [[CL:%.*]] = load i32, ptr [[C]], align 4
entry:
%a = alloca i32, align 4
%b = alloca i32, align 4
%c = alloca i32, align 4
- store i32 2, i32* %a, align 4
- store i32 3, i32* %b, align 4
- store i32 4, i32* %c, align 4
- %al = load i32, i32* %a
- %bl = load i32, i32* %b
- %cl = load i32, i32* %c
+ store i32 2, ptr %a, align 4
+ store i32 3, ptr %b, align 4
+ store i32 4, ptr %c, align 4
+ %al = load i32, ptr %a
+ %bl = load i32, ptr %b
+ %cl = load i32, ptr %c
ret void
}
; CHECK-NEXT: [[RESULT:%.*]] = alloca i32, align 4
; CHECK-NEXT: [[OUTPUT2:%.*]] = alloca i32, align 4
; CHECK-NEXT: [[RESULT2:%.*]] = alloca i32, align 4
-; CHECK-NEXT: [[LT_CAST:%.*]] = bitcast i32* [[ADD_LOC]] to i8*
-; CHECK-NEXT: call void @llvm.lifetime.start.p0i8(i64 -1, i8* [[LT_CAST]])
-; CHECK-NEXT: [[LT_CAST1:%.*]] = bitcast i32* [[DOTLOC]] to i8*
-; CHECK-NEXT: call void @llvm.lifetime.start.p0i8(i64 -1, i8* [[LT_CAST1]])
-; CHECK-NEXT: call void @outlined_ir_func_0(i32 2, i32* [[A]], i32* [[B]], i32* [[OUTPUT]], i32* [[ADD_LOC]], i32* [[DOTLOC]])
-; CHECK-NEXT: [[ADD_RELOAD:%.*]] = load i32, i32* [[ADD_LOC]], align 4
-; CHECK-NEXT: [[DOTRELOAD:%.*]] = load i32, i32* [[DOTLOC]], align 4
-; CHECK-NEXT: call void @llvm.lifetime.end.p0i8(i64 -1, i8* [[LT_CAST]])
-; CHECK-NEXT: call void @llvm.lifetime.end.p0i8(i64 -1, i8* [[LT_CAST1]])
-; CHECK-NEXT: [[TMP0:%.*]] = load i32, i32* [[OUTPUT]], align 4
-; CHECK-NEXT: call void @outlined_ir_func_1(i32 [[DOTRELOAD]], i32 [[ADD_RELOAD]], i32* [[RESULT]])
+; CHECK-NEXT: call void @llvm.lifetime.start.p0(i64 -1, ptr [[ADD_LOC]])
+; CHECK-NEXT: call void @llvm.lifetime.start.p0(i64 -1, ptr [[DOTLOC]])
+; CHECK-NEXT: call void @outlined_ir_func_0(i32 2, ptr [[A]], ptr [[B]], ptr [[OUTPUT]], ptr [[ADD_LOC]], ptr [[DOTLOC]])
+; CHECK-NEXT: [[ADD_RELOAD:%.*]] = load i32, ptr [[ADD_LOC]], align 4
+; CHECK-NEXT: [[DOTRELOAD:%.*]] = load i32, ptr [[DOTLOC]], align 4
+; CHECK-NEXT: call void @llvm.lifetime.end.p0(i64 -1, ptr [[ADD_LOC]])
+; CHECK-NEXT: call void @llvm.lifetime.end.p0(i64 -1, ptr [[DOTLOC]])
+; CHECK-NEXT: [[TMP0:%.*]] = load i32, ptr [[OUTPUT]], align 4
+; CHECK-NEXT: call void @outlined_ir_func_1(i32 [[DOTRELOAD]], i32 [[ADD_RELOAD]], ptr [[RESULT]])
; CHECK-NEXT: br label [[NEXT:%.*]]
; CHECK: next:
-; CHECK-NEXT: [[LT_CAST4:%.*]] = bitcast i32* [[ADD2_LOC]] to i8*
-; CHECK-NEXT: call void @llvm.lifetime.start.p0i8(i64 -1, i8* [[LT_CAST4]])
-; CHECK-NEXT: [[LT_CAST5:%.*]] = bitcast i32* [[DOTLOC2]] to i8*
-; CHECK-NEXT: call void @llvm.lifetime.start.p0i8(i64 -1, i8* [[LT_CAST5]])
-; CHECK-NEXT: call void @outlined_ir_func_0(i32 [[ADD_RELOAD]], i32* [[OUTPUT]], i32* [[RESULT]], i32* [[OUTPUT2]], i32* [[ADD2_LOC]], i32* [[DOTLOC2]])
-; CHECK-NEXT: [[ADD2_RELOAD:%.*]] = load i32, i32* [[ADD2_LOC]], align 4
-; CHECK-NEXT: [[DOTRELOAD3:%.*]] = load i32, i32* [[DOTLOC2]], align 4
-; CHECK-NEXT: call void @llvm.lifetime.end.p0i8(i64 -1, i8* [[LT_CAST4]])
-; CHECK-NEXT: call void @llvm.lifetime.end.p0i8(i64 -1, i8* [[LT_CAST5]])
-; CHECK-NEXT: call void @outlined_ir_func_1(i32 [[DOTRELOAD3]], i32 [[ADD2_RELOAD]], i32* [[RESULT2]])
+; CHECK-NEXT: call void @llvm.lifetime.start.p0(i64 -1, ptr [[ADD2_LOC]])
+; CHECK-NEXT: call void @llvm.lifetime.start.p0(i64 -1, ptr [[DOTLOC2]])
+; CHECK-NEXT: call void @outlined_ir_func_0(i32 [[ADD_RELOAD]], ptr [[OUTPUT]], ptr [[RESULT]], ptr [[OUTPUT2]], ptr [[ADD2_LOC]], ptr [[DOTLOC2]])
+; CHECK-NEXT: [[ADD2_RELOAD:%.*]] = load i32, ptr [[ADD2_LOC]], align 4
+; CHECK-NEXT: [[DOTRELOAD3:%.*]] = load i32, ptr [[DOTLOC2]], align 4
+; CHECK-NEXT: call void @llvm.lifetime.end.p0(i64 -1, ptr [[ADD2_LOC]])
+; CHECK-NEXT: call void @llvm.lifetime.end.p0(i64 -1, ptr [[DOTLOC2]])
+; CHECK-NEXT: call void @outlined_ir_func_1(i32 [[DOTRELOAD3]], i32 [[ADD2_RELOAD]], ptr [[RESULT2]])
; CHECK-NEXT: ret void
;
entry:
%result = alloca i32, align 4
%output2 = alloca i32, align 4
%result2 = alloca i32, align 4
- store i32 2, i32* %a, align 4
- store i32 3, i32* %b, align 4
- %0 = load i32, i32* %a, align 4
- %1 = load i32, i32* %b, align 4
+ store i32 2, ptr %a, align 4
+ store i32 3, ptr %b, align 4
+ %0 = load i32, ptr %a, align 4
+ %1 = load i32, ptr %b, align 4
%add = add i32 %0, %1
- store i32 %add, i32* %output, align 4
- %2 = load i32, i32* %output, align 4
- %3 = load i32, i32* %output, align 4
+ store i32 %add, ptr %output, align 4
+ %2 = load i32, ptr %output, align 4
+ %3 = load i32, ptr %output, align 4
%mul = mul i32 %2, %add
- store i32 %mul, i32* %result, align 4
+ store i32 %mul, ptr %result, align 4
br label %next
next:
- store i32 %add, i32* %output, align 4
- store i32 3, i32* %result, align 4
- %4 = load i32, i32* %output, align 4
- %5 = load i32, i32* %result, align 4
+ store i32 %add, ptr %output, align 4
+ store i32 3, ptr %result, align 4
+ %4 = load i32, ptr %output, align 4
+ %5 = load i32, ptr %result, align 4
%add2 = add i32 %4, %5
- store i32 %add2, i32* %output2, align 4
- %6 = load i32, i32* %output2, align 4
+ store i32 %add2, ptr %output2, align 4
+ %6 = load i32, ptr %output2, align 4
%mul2 = mul i32 %6, %add2
- store i32 %mul2, i32* %result2, align 4
+ store i32 %mul2, ptr %result2, align 4
ret void
}
; CHECK-NEXT: [[A:%.*]] = alloca i32, align 4
; CHECK-NEXT: [[B:%.*]] = alloca i32, align 4
; CHECK-NEXT: [[C:%.*]] = alloca i32, align 4
-; CHECK-NEXT: call void @[[FUNCTION_0:.*]](i32* [[A]], i32* [[B]], i32* [[C]])
+; CHECK-NEXT: call void @[[FUNCTION_0:.*]](ptr [[A]], ptr [[B]], ptr [[C]])
; CHECK-NEXT: ret void
;
entry:
%a = alloca i32, align 4
%b = alloca i32, align 4
%c = alloca i32, align 4
- store i32 2, i32* %a, align 4
- store i32 3, i32* %b, align 4
- store i32 4, i32* %c, align 4
- %al = load i32, i32* %a
- %bl = load i32, i32* %b
- %cl = load i32, i32* %c
+ store i32 2, ptr %a, align 4
+ store i32 3, ptr %b, align 4
+ store i32 4, ptr %c, align 4
+ %al = load i32, ptr %a
+ %bl = load i32, ptr %b
+ %cl = load i32, ptr %c
ret void
}
; CHECK-NEXT: [[A:%.*]] = alloca i32, align 4
; CHECK-NEXT: [[B:%.*]] = alloca i32, align 4
; CHECK-NEXT: [[C:%.*]] = alloca i32, align 4
-; CHECK-NEXT: call void @[[FUNCTION_0]](i32* [[A]], i32* [[B]], i32* [[C]])
+; CHECK-NEXT: call void @[[FUNCTION_0]](ptr [[A]], ptr [[B]], ptr [[C]])
; CHECK-NEXT: ret void
;
entry:
%a = alloca i32, align 4
%b = alloca i32, align 4
%c = alloca i32, align 4
- store i32 2, i32* %a, align 4
- store i32 3, i32* %b, align 4
- store i32 4, i32* %c, align 4
- %al = load i32, i32* %a
- %bl = load i32, i32* %b
- %cl = load i32, i32* %c
+ store i32 2, ptr %a, align 4
+ store i32 3, ptr %b, align 4
+ store i32 4, ptr %c, align 4
+ %al = load i32, ptr %a
+ %bl = load i32, ptr %b
+ %cl = load i32, ptr %c
ret void
}
-; CHECK: define internal void @[[FUNCTION_0]](i32* [[ARG0:%.*]], i32* [[ARG1:%.*]], i32* [[ARG2:%.*]])
+; CHECK: define internal void @[[FUNCTION_0]](ptr [[ARG0:%.*]], ptr [[ARG1:%.*]], ptr [[ARG2:%.*]])
; CHECK: entry_to_outline:
-; CHECK-NEXT: store i32 2, i32* [[ARG0]], align 4
-; CHECK-NEXT: store i32 3, i32* [[ARG1]], align 4
-; CHECK-NEXT: store i32 4, i32* [[ARG2]], align 4
-; CHECK-NEXT: [[AL:%.*]] = load i32, i32* [[ARG0]], align 4
-; CHECK-NEXT: [[BL:%.*]] = load i32, i32* [[ARG1]], align 4
-; CHECK-NEXT: [[CL:%.*]] = load i32, i32* [[ARG2]], align 4
+; CHECK-NEXT: store i32 2, ptr [[ARG0]], align 4
+; CHECK-NEXT: store i32 3, ptr [[ARG1]], align 4
+; CHECK-NEXT: store i32 4, ptr [[ARG2]], align 4
+; CHECK-NEXT: [[AL:%.*]] = load i32, ptr [[ARG0]], align 4
+; CHECK-NEXT: [[BL:%.*]] = load i32, ptr [[ARG1]], align 4
+; CHECK-NEXT: [[CL:%.*]] = load i32, ptr [[ARG2]], align 4
; CHECK-NEXT: ret void
;
entry:
- %0 = load i32, i32* @global1
- %1 = load i32, i32* @global2
+ %0 = load i32, ptr @global1
+ %1 = load i32, ptr @global2
%2 = add i32 %0, %1
ret void
}
; CHECK-NEXT: ret void
;
entry:
- %0 = load i32, i32* @global1
- %1 = load i32, i32* @global2
+ %0 = load i32, ptr @global1
+ %1 = load i32, ptr @global2
%2 = add i32 %0, %1
ret void
}
; CHECK: define internal void @outlined_ir_func_0()
; CHECK: entry_to_outline:
-; CHECK-NEXT: [[TMP1:%.*]] = load i32, i32* @global1, align 4
-; CHECK-NEXT: [[TMP2:%.*]] = load i32, i32* @global2, align 4
+; CHECK-NEXT: [[TMP1:%.*]] = load i32, ptr @global1, align 4
+; CHECK-NEXT: [[TMP2:%.*]] = load i32, ptr @global2, align 4
; CHECK-NEXT: [[ADD:%.*]] = add i32 [[TMP1]], [[TMP2]]
; CHECK-NEXT: [[B:%.*]] = alloca i32, align 4
; CHECK-NEXT: [[OUTPUT:%.*]] = alloca i32, align 4
; CHECK-NEXT: [[RESULT:%.*]] = alloca i32, align 4
-; CHECK-NEXT: [[LT_CAST:%.*]] = bitcast i32* [[ADD_LOC]] to i8*
-; CHECK-NEXT: call void @llvm.lifetime.start.p0i8(i64 -1, i8* [[LT_CAST]])
-; CHECK-NEXT: [[LT_CAST1:%.*]] = bitcast i32* [[DOTLOC]] to i8*
-; CHECK-NEXT: call void @llvm.lifetime.start.p0i8(i64 -1, i8* [[LT_CAST1]])
-; CHECK-NEXT: call void @outlined_ir_func_0(i32* [[A]], i32* [[B]], i32* [[OUTPUT]], i32* [[ADD_LOC]], i32* [[DOTLOC]])
-; CHECK-NEXT: [[ADD_RELOAD:%.*]] = load i32, i32* [[ADD_LOC]], align 4
-; CHECK-NEXT: [[DOTRELOAD:%.*]] = load i32, i32* [[DOTLOC]], align 4
-; CHECK-NEXT: call void @llvm.lifetime.end.p0i8(i64 -1, i8* [[LT_CAST]])
-; CHECK-NEXT: call void @llvm.lifetime.end.p0i8(i64 -1, i8* [[LT_CAST1]])
-; CHECK-NEXT: [[TMP0:%.*]] = load i32, i32* [[OUTPUT]], align 4
-; CHECK-NEXT: call void @outlined_ir_func_1(i32 [[DOTRELOAD]], i32 [[ADD_RELOAD]], i32* [[RESULT]])
+; CHECK-NEXT: call void @llvm.lifetime.start.p0(i64 -1, ptr [[ADD_LOC]])
+; CHECK-NEXT: call void @llvm.lifetime.start.p0(i64 -1, ptr [[DOTLOC]])
+; CHECK-NEXT: call void @outlined_ir_func_0(ptr [[A]], ptr [[B]], ptr [[OUTPUT]], ptr [[ADD_LOC]], ptr [[DOTLOC]])
+; CHECK-NEXT: [[ADD_RELOAD:%.*]] = load i32, ptr [[ADD_LOC]], align 4
+; CHECK-NEXT: [[DOTRELOAD:%.*]] = load i32, ptr [[DOTLOC]], align 4
+; CHECK-NEXT: call void @llvm.lifetime.end.p0(i64 -1, ptr [[ADD_LOC]])
+; CHECK-NEXT: call void @llvm.lifetime.end.p0(i64 -1, ptr [[DOTLOC]])
+; CHECK-NEXT: [[TMP0:%.*]] = load i32, ptr [[OUTPUT]], align 4
+; CHECK-NEXT: call void @outlined_ir_func_1(i32 [[DOTRELOAD]], i32 [[ADD_RELOAD]], ptr [[RESULT]])
; CHECK-NEXT: ret void
;
entry:
%b = alloca i32, align 4
%output = alloca i32, align 4
%result = alloca i32, align 4
- store i32 2, i32* %a, align 4
- store i32 3, i32* %b, align 4
- %0 = load i32, i32* %a, align 4
- %1 = load i32, i32* %b, align 4
+ store i32 2, ptr %a, align 4
+ store i32 3, ptr %b, align 4
+ %0 = load i32, ptr %a, align 4
+ %1 = load i32, ptr %b, align 4
%add = add i32 %0, %1
- store i32 %add, i32* %output, align 4
- %2 = load i32, i32* %output, align 4
- %3 = load i32, i32* %output, align 4
+ store i32 %add, ptr %output, align 4
+ %2 = load i32, ptr %output, align 4
+ %3 = load i32, ptr %output, align 4
%mul = mul i32 %2, %add
- store i32 %mul, i32* %result, align 4
+ store i32 %mul, ptr %result, align 4
ret void
}
; CHECK-NEXT: [[B:%.*]] = alloca i32, align 4
; CHECK-NEXT: [[OUTPUT:%.*]] = alloca i32, align 4
; CHECK-NEXT: [[RESULT:%.*]] = alloca i32, align 4
-; CHECK-NEXT: [[LT_CAST:%.*]] = bitcast i32* [[ADD_LOC]] to i8*
-; CHECK-NEXT: call void @llvm.lifetime.start.p0i8(i64 -1, i8* [[LT_CAST]])
-; CHECK-NEXT: [[LT_CAST1:%.*]] = bitcast i32* [[DOTLOC]] to i8*
-; CHECK-NEXT: call void @llvm.lifetime.start.p0i8(i64 -1, i8* [[LT_CAST1]])
-; CHECK-NEXT: call void @outlined_ir_func_0(i32* [[A]], i32* [[B]], i32* [[OUTPUT]], i32* [[ADD_LOC]], i32* [[DOTLOC]])
-; CHECK-NEXT: [[ADD_RELOAD:%.*]] = load i32, i32* [[ADD_LOC]], align 4
-; CHECK-NEXT: [[DOTRELOAD:%.*]] = load i32, i32* [[DOTLOC]], align 4
-; CHECK-NEXT: call void @llvm.lifetime.end.p0i8(i64 -1, i8* [[LT_CAST]])
-; CHECK-NEXT: call void @llvm.lifetime.end.p0i8(i64 -1, i8* [[LT_CAST1]])
-; CHECK-NEXT: call void @outlined_ir_func_1(i32 [[DOTRELOAD]], i32 [[ADD_RELOAD]], i32* [[RESULT]])
+; CHECK-NEXT: call void @llvm.lifetime.start.p0(i64 -1, ptr [[ADD_LOC]])
+; CHECK-NEXT: call void @llvm.lifetime.start.p0(i64 -1, ptr [[DOTLOC]])
+; CHECK-NEXT: call void @outlined_ir_func_0(ptr [[A]], ptr [[B]], ptr [[OUTPUT]], ptr [[ADD_LOC]], ptr [[DOTLOC]])
+; CHECK-NEXT: [[ADD_RELOAD:%.*]] = load i32, ptr [[ADD_LOC]], align 4
+; CHECK-NEXT: [[DOTRELOAD:%.*]] = load i32, ptr [[DOTLOC]], align 4
+; CHECK-NEXT: call void @llvm.lifetime.end.p0(i64 -1, ptr [[ADD_LOC]])
+; CHECK-NEXT: call void @llvm.lifetime.end.p0(i64 -1, ptr [[DOTLOC]])
+; CHECK-NEXT: call void @outlined_ir_func_1(i32 [[DOTRELOAD]], i32 [[ADD_RELOAD]], ptr [[RESULT]])
; CHECK-NEXT: ret void
;
entry:
%b = alloca i32, align 4
%output = alloca i32, align 4
%result = alloca i32, align 4
- store i32 2, i32* %a, align 4
- store i32 3, i32* %b, align 4
- %0 = load i32, i32* %a, align 4
- %1 = load i32, i32* %b, align 4
+ store i32 2, ptr %a, align 4
+ store i32 3, ptr %b, align 4
+ %0 = load i32, ptr %a, align 4
+ %1 = load i32, ptr %b, align 4
%add = add i32 %0, %1
- store i32 %add, i32* %output, align 4
- %2 = load i32, i32* %output, align 4
+ store i32 %add, ptr %output, align 4
+ %2 = load i32, ptr %output, align 4
%mul = mul i32 %2, %add
- store i32 %mul, i32* %result, align 4
+ store i32 %mul, ptr %result, align 4
ret void
}
-; CHECK: define internal void @outlined_ir_func_0(i32* [[ARG0:%.*]], i32* [[ARG1:%.*]], i32* [[ARG2:%.*]], i32* [[ARG3:%.*]], i32* [[ARG4:%.*]]) #1 {
+; CHECK: define internal void @outlined_ir_func_0(ptr [[ARG0:%.*]], ptr [[ARG1:%.*]], ptr [[ARG2:%.*]], ptr [[ARG3:%.*]], ptr [[ARG4:%.*]]) #1 {
; CHECK: entry_to_outline:
-; CHECK-NEXT: store i32 2, i32* [[ARG0]], align 4
-; CHECK-NEXT: store i32 3, i32* [[ARG1]], align 4
-; CHECK-NEXT: [[TMP0:%.*]] = load i32, i32* [[ARG0]], align 4
-; CHECK-NEXT: [[TMP1:%.*]] = load i32, i32* [[ARG1]], align 4
+; CHECK-NEXT: store i32 2, ptr [[ARG0]], align 4
+; CHECK-NEXT: store i32 3, ptr [[ARG1]], align 4
+; CHECK-NEXT: [[TMP0:%.*]] = load i32, ptr [[ARG0]], align 4
+; CHECK-NEXT: [[TMP1:%.*]] = load i32, ptr [[ARG1]], align 4
; CHECK-NEXT: [[ADD:%.*]] = add i32 [[TMP0]], [[TMP1]]
-; CHECK-NEXT: store i32 [[ADD]], i32* [[ARG2]], align 4
-; CHECK-NEXT: [[TMP2:%.*]] = load i32, i32* [[ARG2]], align 4
+; CHECK-NEXT: store i32 [[ADD]], ptr [[ARG2]], align 4
+; CHECK-NEXT: [[TMP2:%.*]] = load i32, ptr [[ARG2]], align 4
; CHECK: entry_after_outline.exitStub:
-; CHECK-NEXT: store i32 [[ADD]], i32* [[ARG3]], align 4
-; CHECK-NEXT: store i32 [[TMP2]], i32* [[ARG4]], align 4
+; CHECK-NEXT: store i32 [[ADD]], ptr [[ARG3]], align 4
+; CHECK-NEXT: store i32 [[TMP2]], ptr [[ARG4]], align 4
%swift.error = type opaque
-define void @outlining_swifterror1(%swift.error** swifterror %err) {
+define void @outlining_swifterror1(ptr swifterror %err) {
; CHECK-LABEL: @outlining_swifterror1(
; CHECK-NEXT: entry:
; CHECK-NEXT: [[X:%.*]] = alloca i64, align 8
-; CHECK-NEXT: call void @outlined_ir_func_0(i64 5, i64* [[X]], %swift.error** swifterror [[ERR:%.*]])
+; CHECK-NEXT: call void @outlined_ir_func_0(i64 5, ptr [[X]], ptr swifterror [[ERR:%.*]])
; CHECK-NEXT: ret void
;
entry:
%x = alloca i64
%0 = mul i64 5, 5
%1 = add i64 %0, %0
- store i64 %1, i64* %x
- %casted = bitcast i64* %x to %swift.error*
- store %swift.error* %casted, %swift.error** %err
+ store i64 %1, ptr %x
+ store ptr %x, ptr %err
ret void
}
-define void @outlining_swifterror2(%swift.error** swifterror %err) {
+define void @outlining_swifterror2(ptr swifterror %err) {
; CHECK-LABEL: @outlining_swifterror2(
; CHECK-NEXT: entry:
; CHECK-NEXT: [[X:%.*]] = alloca i64, align 8
-; CHECK-NEXT: call void @outlined_ir_func_0(i64 3, i64* [[X]], %swift.error** swifterror [[ERR:%.*]])
+; CHECK-NEXT: call void @outlined_ir_func_0(i64 3, ptr [[X]], ptr swifterror [[ERR:%.*]])
; CHECK-NEXT: ret void
;
entry:
%x = alloca i64
%0 = mul i64 3, 3
%1 = add i64 %0, %0
- store i64 %1, i64* %x
- %casted = bitcast i64* %x to %swift.error*
- store %swift.error* %casted, %swift.error** %err
+ store i64 %1, ptr %x
+ store ptr %x, ptr %err
ret void
}
-; CHECK: define internal void @outlined_ir_func_0(i64 [[ARG0:%.*]], i64* [[ARG1:%.*]], %swift.error** swifterror [[ARG2:%.*]])
+; CHECK: define internal void @outlined_ir_func_0(i64 [[ARG0:%.*]], ptr [[ARG1:%.*]], ptr swifterror [[ARG2:%.*]])
; CHECK: entry_to_outline:
; CHECK-NEXT: [[TMP0:%.*]] = mul i64 [[ARG0]], [[ARG0]]
; CHECK-NEXT: [[TMP1:%.*]] = add i64 [[TMP0]], [[TMP0]]
-; CHECK-NEXT: store i64 [[TMP1]], i64* [[ARG1]], align 4
-; CHECK-NEXT: %casted = bitcast i64* [[ARG1]] to %swift.error*
-; CHECK-NEXT: store %swift.error* %casted, %swift.error** [[ARG2]], align 8
+; CHECK-NEXT: store i64 [[TMP1]], ptr [[ARG1]], align 4
+; CHECK-NEXT: store ptr [[ARG1]], ptr [[ARG2]], align 8
; CHECK-NEXT: br label %entry_after_outline.exitStub
define swifttailcc void @f1() {
%a = alloca i32, align 4
- store i32 2, i32* %a, align 4
+ store i32 2, ptr %a, align 4
musttail call swifttailcc void @musttail()
ret void
}
define swifttailcc void @f2() {
%a = alloca i32, align 4
- store i32 2, i32* %a, align 4
+ store i32 2, ptr %a, align 4
musttail call swifttailcc void @musttail()
ret void
}
; CHECK-LABEL: @f1(
; CHECK-NEXT: [[A:%.*]] = alloca i32, align 4
-; CHECK-NEXT: store i32 2, i32* [[A]], align 4
+; CHECK-NEXT: store i32 2, ptr [[A]], align 4
; CHECK-NEXT: musttail call swifttailcc void @musttail()
; CHECK-NEXT: ret void
;
;
; CHECK-LABEL: @f2(
; CHECK-NEXT: [[A:%.*]] = alloca i32, align 4
-; CHECK-NEXT: store i32 2, i32* [[A]], align 4
+; CHECK-NEXT: store i32 2, ptr [[A]], align 4
; CHECK-NEXT: musttail call swifttailcc void @musttail()
; CHECK-NEXT: ret void
;
define tailcc void @f1() {
%a = alloca i32, align 4
- store i32 2, i32* %a, align 4
+ store i32 2, ptr %a, align 4
musttail call tailcc void @musttail()
ret void
}
define tailcc void @f2() {
%a = alloca i32, align 4
- store i32 2, i32* %a, align 4
+ store i32 2, ptr %a, align 4
musttail call tailcc void @musttail()
ret void
}
; CHECK-LABEL: @f1(
; CHECK-NEXT: [[A:%.*]] = alloca i32, align 4
-; CHECK-NEXT: store i32 2, i32* [[A]], align 4
+; CHECK-NEXT: store i32 2, ptr [[A]], align 4
; CHECK-NEXT: musttail call tailcc void @musttail()
; CHECK-NEXT: ret void
;
;
; CHECK-LABEL: @f2(
; CHECK-NEXT: [[A:%.*]] = alloca i32, align 4
-; CHECK-NEXT: store i32 2, i32* [[A]], align 4
+; CHECK-NEXT: store i32 2, ptr [[A]], align 4
; CHECK-NEXT: musttail call tailcc void @musttail()
; CHECK-NEXT: ret void
;
; Show that we do extract phi nodes from the regions.
-define void @function1(i32* %a, i32* %b) {
+define void @function1(ptr %a, ptr %b) {
entry:
%0 = alloca i32, align 4
- %c = load i32, i32* %0, align 4
+ %c = load i32, ptr %0, align 4
br label %test1
test1:
- %e = load i32, i32* %0, align 4
+ %e = load i32, ptr %0, align 4
br label %first
test:
- %d = load i32, i32* %0, align 4
+ %d = load i32, ptr %0, align 4
br label %first
first:
%1 = phi i32 [ %c, %test ], [ %e, %test1 ]
- store i32 2, i32* %a, align 4
- store i32 3, i32* %b, align 4
+ store i32 2, ptr %a, align 4
+ store i32 3, ptr %b, align 4
ret void
}
-define void @function2(i32* %a, i32* %b) {
+define void @function2(ptr %a, ptr %b) {
entry:
%0 = alloca i32, align 4
- %c = load i32, i32* %0, align 4
+ %c = load i32, ptr %0, align 4
br label %test1
test1:
- %e = load i32, i32* %0, align 4
+ %e = load i32, ptr %0, align 4
br label %first
test:
- %d = load i32, i32* %0, align 4
+ %d = load i32, ptr %0, align 4
br label %first
first:
%1 = phi i32 [ %c, %test ], [ %e, %test1 ]
- store i32 2, i32* %a, align 4
- store i32 3, i32* %b, align 4
+ store i32 2, ptr %a, align 4
+ store i32 3, ptr %b, align 4
ret void
}
; CHECK-LABEL: @function1(
; CHECK-NEXT: entry:
; CHECK-NEXT: [[TMP0:%.*]] = alloca i32, align 4
-; CHECK-NEXT: call void @outlined_ir_func_0(i32* [[TMP0]], i32* [[A:%.*]], i32* [[B:%.*]])
+; CHECK-NEXT: call void @outlined_ir_func_0(ptr [[TMP0]], ptr [[A:%.*]], ptr [[B:%.*]])
; CHECK-NEXT: ret void
;
;
; CHECK-LABEL: @function2(
; CHECK-NEXT: entry:
; CHECK-NEXT: [[TMP0:%.*]] = alloca i32, align 4
-; CHECK-NEXT: call void @outlined_ir_func_0(i32* [[TMP0]], i32* [[A:%.*]], i32* [[B:%.*]])
+; CHECK-NEXT: call void @outlined_ir_func_0(ptr [[TMP0]], ptr [[A:%.*]], ptr [[B:%.*]])
; CHECK-NEXT: ret void
;
;
; CHECK-NEXT: newFuncRoot:
; CHECK-NEXT: br label [[ENTRY_TO_OUTLINE:%.*]]
; CHECK: entry_to_outline:
-; CHECK-NEXT: [[C:%.*]] = load i32, i32* [[TMP0:%.*]], align 4
+; CHECK-NEXT: [[C:%.*]] = load i32, ptr [[TMP0:%.*]], align 4
; CHECK-NEXT: br label [[TEST1:%.*]]
; CHECK: test1:
-; CHECK-NEXT: [[E:%.*]] = load i32, i32* [[TMP0]], align 4
+; CHECK-NEXT: [[E:%.*]] = load i32, ptr [[TMP0]], align 4
; CHECK-NEXT: br label [[FIRST:%.*]]
; CHECK: test:
-; CHECK-NEXT: [[D:%.*]] = load i32, i32* [[TMP0]], align 4
+; CHECK-NEXT: [[D:%.*]] = load i32, ptr [[TMP0]], align 4
; CHECK-NEXT: br label [[FIRST]]
; CHECK: first:
; CHECK-NEXT: [[TMP3:%.*]] = phi i32 [ [[C]], [[TEST:%.*]] ], [ [[E]], [[TEST1]] ]
-; CHECK-NEXT: store i32 2, i32* [[TMP1:%.*]], align 4
-; CHECK-NEXT: store i32 3, i32* [[TMP2:%.*]], align 4
+; CHECK-NEXT: store i32 2, ptr [[TMP1:%.*]], align 4
+; CHECK-NEXT: store i32 3, ptr [[TMP2:%.*]], align 4
; CHECK-NEXT: br label [[ENTRY_AFTER_OUTLINE_EXITSTUB:%.*]]
; CHECK: entry_after_outline.exitStub:
; CHECK-NEXT: ret void
; Show that we do not extract similar regions that would involve the splitting
; of phi nodes on exit.
-define void @function1(i32* %a, i32* %b) {
+define void @function1(ptr %a, ptr %b) {
entry:
%0 = alloca i32, align 4
- %c = load i32, i32* %0, align 4
+ %c = load i32, ptr %0, align 4
br label %test1
test1:
- %e = load i32, i32* %0, align 4
+ %e = load i32, ptr %0, align 4
br i1 true, label %first, label %next
test:
- %d = load i32, i32* %0, align 4
+ %d = load i32, ptr %0, align 4
br i1 true, label %first, label %next
first:
%1 = phi i32 [ %c, %test ], [ %e, %test1 ]
ret void
}
-define void @function2(i32* %a, i32* %b) {
+define void @function2(ptr %a, ptr %b) {
entry:
%0 = alloca i32, align 4
- %c = load i32, i32* %0, align 4
+ %c = load i32, ptr %0, align 4
br label %test1
test1:
- %e = load i32, i32* %0, align 4
+ %e = load i32, ptr %0, align 4
br i1 true, label %first, label %next
test:
- %d = load i32, i32* %0, align 4
+ %d = load i32, ptr %0, align 4
br i1 true, label %first, label %next
first:
ret void
; CHECK-NEXT: entry:
; CHECK-NEXT: [[DOTCE_LOC:%.*]] = alloca i32, align 4
; CHECK-NEXT: [[TMP0:%.*]] = alloca i32, align 4
-; CHECK-NEXT: [[LT_CAST:%.*]] = bitcast i32* [[DOTCE_LOC]] to i8*
-; CHECK-NEXT: call void @llvm.lifetime.start.p0i8(i64 -1, i8* [[LT_CAST]])
-; CHECK-NEXT: [[TMP1:%.*]] = call i1 @outlined_ir_func_0(i32* [[TMP0]], i32* [[DOTCE_LOC]], i32 0)
-; CHECK-NEXT: [[DOTCE_RELOAD:%.*]] = load i32, i32* [[DOTCE_LOC]], align 4
-; CHECK-NEXT: call void @llvm.lifetime.end.p0i8(i64 -1, i8* [[LT_CAST]])
+; CHECK-NEXT: call void @llvm.lifetime.start.p0(i64 -1, ptr [[DOTCE_LOC]])
+; CHECK-NEXT: [[TMP1:%.*]] = call i1 @outlined_ir_func_0(ptr [[TMP0]], ptr [[DOTCE_LOC]], i32 0)
+; CHECK-NEXT: [[DOTCE_RELOAD:%.*]] = load i32, ptr [[DOTCE_LOC]], align 4
+; CHECK-NEXT: call void @llvm.lifetime.end.p0(i64 -1, ptr [[DOTCE_LOC]])
; CHECK-NEXT: br i1 [[TMP1]], label [[FIRST:%.*]], label [[NEXT:%.*]]
; CHECK: first:
; CHECK-NEXT: [[TMP2:%.*]] = phi i32 [ [[DOTCE_RELOAD]], [[ENTRY:%.*]] ]
; CHECK-NEXT: entry:
; CHECK-NEXT: [[DOTCE_LOC:%.*]] = alloca i32, align 4
; CHECK-NEXT: [[TMP0:%.*]] = alloca i32, align 4
-; CHECK-NEXT: [[LT_CAST:%.*]] = bitcast i32* [[DOTCE_LOC]] to i8*
-; CHECK-NEXT: call void @llvm.lifetime.start.p0i8(i64 -1, i8* [[LT_CAST]])
-; CHECK-NEXT: [[TMP1:%.*]] = call i1 @outlined_ir_func_0(i32* [[TMP0]], i32* [[DOTCE_LOC]], i32 1)
-; CHECK-NEXT: [[DOTCE_RELOAD:%.*]] = load i32, i32* [[DOTCE_LOC]], align 4
-; CHECK-NEXT: call void @llvm.lifetime.end.p0i8(i64 -1, i8* [[LT_CAST]])
+; CHECK-NEXT: call void @llvm.lifetime.start.p0(i64 -1, ptr [[DOTCE_LOC]])
+; CHECK-NEXT: [[TMP1:%.*]] = call i1 @outlined_ir_func_0(ptr [[TMP0]], ptr [[DOTCE_LOC]], i32 1)
+; CHECK-NEXT: [[DOTCE_RELOAD:%.*]] = load i32, ptr [[DOTCE_LOC]], align 4
+; CHECK-NEXT: call void @llvm.lifetime.end.p0(i64 -1, ptr [[DOTCE_LOC]])
; CHECK-NEXT: br i1 [[TMP1]], label [[FIRST:%.*]], label [[NEXT:%.*]]
; CHECK: first:
; CHECK-NEXT: ret void
; CHECK-NEXT: newFuncRoot:
; CHECK-NEXT: br label [[ENTRY_TO_OUTLINE:%.*]]
; CHECK: entry_to_outline:
-; CHECK-NEXT: [[C:%.*]] = load i32, i32* [[TMP0:%.*]], align 4
+; CHECK-NEXT: [[C:%.*]] = load i32, ptr [[TMP0:%.*]], align 4
; CHECK-NEXT: br label [[TEST1:%.*]]
; CHECK: test1:
-; CHECK-NEXT: [[E:%.*]] = load i32, i32* [[TMP0]], align 4
+; CHECK-NEXT: [[E:%.*]] = load i32, ptr [[TMP0]], align 4
; CHECK-NEXT: br i1 true, label [[FIRST_SPLIT:%.*]], label [[PHI_BLOCK:%.*]]
; CHECK: test:
-; CHECK-NEXT: [[D:%.*]] = load i32, i32* [[TMP0]], align 4
+; CHECK-NEXT: [[D:%.*]] = load i32, ptr [[TMP0]], align 4
; CHECK-NEXT: br i1 true, label [[FIRST_SPLIT]], label [[PHI_BLOCK]]
; CHECK: first.split:
; CHECK-NEXT: [[DOTCE:%.*]] = phi i32 [ [[C]], [[TEST:%.*]] ], [ [[E]], [[TEST1]] ]
; CHECK-NEXT: i32 0, label [[OUTPUT_BLOCK_1_0:%.*]]
; CHECK-NEXT: ]
; CHECK: output_block_0_1:
-; CHECK-NEXT: store i32 [[DOTCE]], i32* [[TMP1:%.*]], align 4
+; CHECK-NEXT: store i32 [[DOTCE]], ptr [[TMP1:%.*]], align 4
; CHECK-NEXT: br label [[FINAL_BLOCK_1]]
; CHECK: output_block_1_0:
-; CHECK-NEXT: store i32 [[TMP3:%.*]], i32* [[TMP1]], align 4
+; CHECK-NEXT: store i32 [[TMP3:%.*]], ptr [[TMP1]], align 4
; CHECK-NEXT: br label [[FINAL_BLOCK_0]]
; CHECK: phi_block:
; CHECK-NEXT: [[TMP3]] = phi i32 [ [[C]], [[TEST]] ], [ [[E]], [[TEST1]] ]
; Show that we do not outline when all of the phi nodes in the beginning
; block are included not in the region.
-define void @function1(i32* %a, i32* %b) {
+define void @function1(ptr %a, ptr %b) {
entry:
%0 = alloca i32, align 4
- %c = load i32, i32* %0, align 4
+ %c = load i32, ptr %0, align 4
%y = add i32 %c, %c
br label %test1
dummy:
test1:
%1 = phi i32 [ %e, %test1 ], [ %y, %entry ]
%2 = phi i32 [ %e, %test1 ], [ %y, %entry ]
- %e = load i32, i32* %0, align 4
+ %e = load i32, ptr %0, align 4
%3 = add i32 %c, %c
%4 = sub i32 %c, %c
br i1 true, label %first, label %test1
ret void
}
-define void @function2(i32* %a, i32* %b) {
+define void @function2(ptr %a, ptr %b) {
entry:
%0 = alloca i32, align 4
- %c = load i32, i32* %0, align 4
+ %c = load i32, ptr %0, align 4
%y = mul i32 %c, %c
br label %test1
dummy:
test1:
%1 = phi i32 [ %e, %test1 ], [ %y, %entry ]
%2 = phi i32 [ %y, %entry ], [ %e, %test1 ]
- %e = load i32, i32* %0, align 4
+ %e = load i32, ptr %0, align 4
%3 = add i32 %c, %c
%4 = mul i32 %c, %c
br i1 true, label %first, label %test1
; CHECK-NEXT: entry:
; CHECK-NEXT: [[E_LOC:%.*]] = alloca i32, align 4
; CHECK-NEXT: [[TMP0:%.*]] = alloca i32, align 4
-; CHECK-NEXT: [[C:%.*]] = load i32, i32* [[TMP0]], align 4
+; CHECK-NEXT: [[C:%.*]] = load i32, ptr [[TMP0]], align 4
; CHECK-NEXT: [[Y:%.*]] = add i32 [[C]], [[C]]
; CHECK-NEXT: br label [[TEST1:%.*]]
; CHECK: dummy:
; CHECK: test1:
; CHECK-NEXT: [[TMP1:%.*]] = phi i32 [ [[E_RELOAD:%.*]], [[TEST1]] ], [ [[Y]], [[ENTRY:%.*]] ]
; CHECK-NEXT: [[TMP2:%.*]] = phi i32 [ [[E_RELOAD]], [[TEST1]] ], [ [[Y]], [[ENTRY]] ]
-; CHECK-NEXT: [[LT_CAST:%.*]] = bitcast i32* [[E_LOC]] to i8*
-; CHECK-NEXT: call void @llvm.lifetime.start.p0i8(i64 -1, i8* [[LT_CAST]])
-; CHECK-NEXT: call void @outlined_ir_func_0(i32* [[TMP0]], i32 [[C]], i32* [[E_LOC]])
-; CHECK-NEXT: [[E_RELOAD]] = load i32, i32* [[E_LOC]], align 4
-; CHECK-NEXT: call void @llvm.lifetime.end.p0i8(i64 -1, i8* [[LT_CAST]])
+; CHECK-NEXT: call void @llvm.lifetime.start.p0(i64 -1, ptr [[E_LOC]])
+; CHECK-NEXT: call void @outlined_ir_func_0(ptr [[TMP0]], i32 [[C]], ptr [[E_LOC]])
+; CHECK-NEXT: [[E_RELOAD]] = load i32, ptr [[E_LOC]], align 4
+; CHECK-NEXT: call void @llvm.lifetime.end.p0(i64 -1, ptr [[E_LOC]])
; CHECK-NEXT: [[TMP3:%.*]] = sub i32 [[C]], [[C]]
; CHECK-NEXT: br i1 true, label [[FIRST:%.*]], label [[TEST1]]
; CHECK: first:
; CHECK-NEXT: entry:
; CHECK-NEXT: [[E_LOC:%.*]] = alloca i32, align 4
; CHECK-NEXT: [[TMP0:%.*]] = alloca i32, align 4
-; CHECK-NEXT: [[C:%.*]] = load i32, i32* [[TMP0]], align 4
+; CHECK-NEXT: [[C:%.*]] = load i32, ptr [[TMP0]], align 4
; CHECK-NEXT: [[Y:%.*]] = mul i32 [[C]], [[C]]
; CHECK-NEXT: br label [[TEST1:%.*]]
; CHECK: dummy:
; CHECK: test1:
; CHECK-NEXT: [[TMP1:%.*]] = phi i32 [ [[E_RELOAD:%.*]], [[TEST1]] ], [ [[Y]], [[ENTRY:%.*]] ]
; CHECK-NEXT: [[TMP2:%.*]] = phi i32 [ [[Y]], [[ENTRY]] ], [ [[E_RELOAD]], [[TEST1]] ]
-; CHECK-NEXT: [[LT_CAST:%.*]] = bitcast i32* [[E_LOC]] to i8*
-; CHECK-NEXT: call void @llvm.lifetime.start.p0i8(i64 -1, i8* [[LT_CAST]])
-; CHECK-NEXT: call void @outlined_ir_func_0(i32* [[TMP0]], i32 [[C]], i32* [[E_LOC]])
-; CHECK-NEXT: [[E_RELOAD]] = load i32, i32* [[E_LOC]], align 4
-; CHECK-NEXT: call void @llvm.lifetime.end.p0i8(i64 -1, i8* [[LT_CAST]])
+; CHECK-NEXT: call void @llvm.lifetime.start.p0(i64 -1, ptr [[E_LOC]])
+; CHECK-NEXT: call void @outlined_ir_func_0(ptr [[TMP0]], i32 [[C]], ptr [[E_LOC]])
+; CHECK-NEXT: [[E_RELOAD]] = load i32, ptr [[E_LOC]], align 4
+; CHECK-NEXT: call void @llvm.lifetime.end.p0(i64 -1, ptr [[E_LOC]])
; CHECK-NEXT: [[TMP3:%.*]] = mul i32 [[C]], [[C]]
; CHECK-NEXT: br i1 true, label [[FIRST:%.*]], label [[TEST1]]
; CHECK: first:
; CHECK-NEXT: newFuncRoot:
; CHECK-NEXT: br label [[TEST1_TO_OUTLINE:%.*]]
; CHECK: test1_to_outline:
-; CHECK-NEXT: [[E:%.*]] = load i32, i32* [[TMP0:%.*]], align 4
+; CHECK-NEXT: [[E:%.*]] = load i32, ptr [[TMP0:%.*]], align 4
; CHECK-NEXT: [[TMP3:%.*]] = add i32 [[TMP1:%.*]], [[TMP1]]
; CHECK-NEXT: br label [[TEST1_AFTER_OUTLINE_EXITSTUB:%.*]]
; CHECK: test1_after_outline.exitStub:
-; CHECK-NEXT: store i32 [[E]], i32* [[TMP2:%.*]], align 4
+; CHECK-NEXT: store i32 [[E]], ptr [[TMP2:%.*]], align 4
; CHECK-NEXT: ret void
;
; Show that we are able to outline the simple phi node case of constants when
; the corresponding labels match.
-define void @function1(i32* %a, i32* %b) {
+define void @function1(ptr %a, ptr %b) {
entry:
br label %test
test:
br label %first
first:
%0 = phi i32 [ 0, %test ]
- store i32 2, i32* %a, align 4
- store i32 3, i32* %b, align 4
+ store i32 2, ptr %a, align 4
+ store i32 3, ptr %b, align 4
ret void
}
-define void @function2(i32* %a, i32* %b) {
+define void @function2(ptr %a, ptr %b) {
entry:
br label %test
test:
br label %first
first:
%0 = phi i32 [ 0, %test ]
- store i32 2, i32* %a, align 4
- store i32 3, i32* %b, align 4
+ store i32 2, ptr %a, align 4
+ store i32 3, ptr %b, align 4
ret void
}
; CHECK-LABEL: @function1(
; CHECK-NEXT: entry:
-; CHECK-NEXT: call void @outlined_ir_func_0(i32* [[A:%.*]], i32* [[B:%.*]])
+; CHECK-NEXT: call void @outlined_ir_func_0(ptr [[A:%.*]], ptr [[B:%.*]])
; CHECK-NEXT: ret void
;
;
; CHECK-LABEL: @function2(
; CHECK-NEXT: entry:
-; CHECK-NEXT: call void @outlined_ir_func_0(i32* [[A:%.*]], i32* [[B:%.*]])
+; CHECK-NEXT: call void @outlined_ir_func_0(ptr [[A:%.*]], ptr [[B:%.*]])
; CHECK-NEXT: ret void
;
;
; CHECK-NEXT: br label [[FIRST:%.*]]
; CHECK: first:
; CHECK-NEXT: [[TMP2:%.*]] = phi i32 [ 0, [[TEST]] ]
-; CHECK-NEXT: store i32 2, i32* [[TMP0:%.*]], align 4
-; CHECK-NEXT: store i32 3, i32* [[TMP1:%.*]], align 4
+; CHECK-NEXT: store i32 2, ptr [[TMP0:%.*]], align 4
+; CHECK-NEXT: store i32 3, ptr [[TMP1:%.*]], align 4
; CHECK-NEXT: br label [[ENTRY_AFTER_OUTLINE_EXITSTUB:%.*]]
; CHECK: entry_after_outline.exitStub:
; CHECK-NEXT: ret void
; This test checks that we do not fail when there is a similarity group with
; an ending instruction that is also the end of the module.
-@a = global i8* null
+@a = global ptr null
define void @foo() {
entry:
; Show that we are able to propogate inputs to the region into the split PHINode
; outside of the region if necessary.
-define void @function1(i32* %a, i32* %b) {
+define void @function1(ptr %a, ptr %b) {
entry:
%0 = alloca i32, align 4
- %c = load i32, i32* %0, align 4
+ %c = load i32, ptr %0, align 4
%z = add i32 %c, %c
br i1 true, label %test1, label %first
test1:
- %e = load i32, i32* %0, align 4
+ %e = load i32, ptr %0, align 4
%1 = add i32 %c, %c
br i1 true, label %first, label %test
test:
- %d = load i32, i32* %0, align 4
+ %d = load i32, ptr %0, align 4
br i1 true, label %first, label %next
dummy:
ret void
ret void
}
-define void @function2(i32* %a, i32* %b) {
+define void @function2(ptr %a, ptr %b) {
entry:
%0 = alloca i32, align 4
- %c = load i32, i32* %0, align 4
+ %c = load i32, ptr %0, align 4
%z = mul i32 %c, %c
br i1 true, label %test1, label %first
test1:
- %e = load i32, i32* %0, align 4
+ %e = load i32, ptr %0, align 4
%1 = add i32 %c, %c
br i1 true, label %first, label %test
test:
- %d = load i32, i32* %0, align 4
+ %d = load i32, ptr %0, align 4
br i1 true, label %first, label %next
dummy:
ret void
; CHECK-NEXT: entry:
; CHECK-NEXT: [[DOTCE_LOC:%.*]] = alloca i32, align 4
; CHECK-NEXT: [[TMP0:%.*]] = alloca i32, align 4
-; CHECK-NEXT: [[C:%.*]] = load i32, i32* [[TMP0]], align 4
+; CHECK-NEXT: [[C:%.*]] = load i32, ptr [[TMP0]], align 4
; CHECK-NEXT: [[Z:%.*]] = add i32 [[C]], [[C]]
-; CHECK-NEXT: [[LT_CAST:%.*]] = bitcast i32* [[DOTCE_LOC]] to i8*
-; CHECK-NEXT: call void @llvm.lifetime.start.p0i8(i64 -1, i8* [[LT_CAST]])
-; CHECK-NEXT: [[TARGETBLOCK:%.*]] = call i1 @outlined_ir_func_0(i32* [[TMP0]], i32 [[C]], i32* [[DOTCE_LOC]])
-; CHECK-NEXT: [[DOTCE_RELOAD:%.*]] = load i32, i32* [[DOTCE_LOC]], align 4
-; CHECK-NEXT: call void @llvm.lifetime.end.p0i8(i64 -1, i8* [[LT_CAST]])
+; CHECK-NEXT: call void @llvm.lifetime.start.p0(i64 -1, ptr [[DOTCE_LOC]])
+; CHECK-NEXT: [[TARGETBLOCK:%.*]] = call i1 @outlined_ir_func_0(ptr [[TMP0]], i32 [[C]], ptr [[DOTCE_LOC]])
+; CHECK-NEXT: [[DOTCE_RELOAD:%.*]] = load i32, ptr [[DOTCE_LOC]], align 4
+; CHECK-NEXT: call void @llvm.lifetime.end.p0(i64 -1, ptr [[DOTCE_LOC]])
; CHECK-NEXT: br i1 [[TARGETBLOCK]], label [[FIRST:%.*]], label [[NEXT:%.*]]
; CHECK: dummy:
; CHECK-NEXT: ret void
; CHECK-NEXT: entry:
; CHECK-NEXT: [[DOTCE_LOC:%.*]] = alloca i32, align 4
; CHECK-NEXT: [[TMP0:%.*]] = alloca i32, align 4
-; CHECK-NEXT: [[C:%.*]] = load i32, i32* [[TMP0]], align 4
+; CHECK-NEXT: [[C:%.*]] = load i32, ptr [[TMP0]], align 4
; CHECK-NEXT: [[Z:%.*]] = mul i32 [[C]], [[C]]
-; CHECK-NEXT: [[LT_CAST:%.*]] = bitcast i32* [[DOTCE_LOC]] to i8*
-; CHECK-NEXT: call void @llvm.lifetime.start.p0i8(i64 -1, i8* [[LT_CAST]])
-; CHECK-NEXT: [[TARGETBLOCK:%.*]] = call i1 @outlined_ir_func_0(i32* [[TMP0]], i32 [[C]], i32* [[DOTCE_LOC]])
-; CHECK-NEXT: [[DOTCE_RELOAD:%.*]] = load i32, i32* [[DOTCE_LOC]], align 4
-; CHECK-NEXT: call void @llvm.lifetime.end.p0i8(i64 -1, i8* [[LT_CAST]])
+; CHECK-NEXT: call void @llvm.lifetime.start.p0(i64 -1, ptr [[DOTCE_LOC]])
+; CHECK-NEXT: [[TARGETBLOCK:%.*]] = call i1 @outlined_ir_func_0(ptr [[TMP0]], i32 [[C]], ptr [[DOTCE_LOC]])
+; CHECK-NEXT: [[DOTCE_RELOAD:%.*]] = load i32, ptr [[DOTCE_LOC]], align 4
+; CHECK-NEXT: call void @llvm.lifetime.end.p0(i64 -1, ptr [[DOTCE_LOC]])
; CHECK-NEXT: br i1 [[TARGETBLOCK]], label [[FIRST:%.*]], label [[NEXT:%.*]]
; CHECK: dummy:
; CHECK-NEXT: ret void
; CHECK: entry_to_outline:
; CHECK-NEXT: br i1 true, label [[TEST1:%.*]], label [[FIRST_SPLIT:%.*]]
; CHECK: test1:
-; CHECK-NEXT: [[E:%.*]] = load i32, i32* [[TMP0:%.*]], align 4
+; CHECK-NEXT: [[E:%.*]] = load i32, ptr [[TMP0:%.*]], align 4
; CHECK-NEXT: [[TMP3:%.*]] = add i32 [[TMP1:%.*]], [[TMP1]]
; CHECK-NEXT: br i1 true, label [[FIRST_SPLIT]], label [[TEST:%.*]]
; CHECK: test:
-; CHECK-NEXT: [[D:%.*]] = load i32, i32* [[TMP0]], align 4
+; CHECK-NEXT: [[D:%.*]] = load i32, ptr [[TMP0]], align 4
; CHECK-NEXT: br i1 true, label [[FIRST_SPLIT]], label [[NEXT_EXITSTUB:%.*]]
; CHECK: first.split:
; CHECK-NEXT: [[DOTCE:%.*]] = phi i32 [ [[D]], [[TEST]] ], [ [[E]], [[TEST1]] ], [ [[TMP1]], [[ENTRY_TO_OUTLINE]] ]
; CHECK-NEXT: br label [[FIRST_EXITSTUB:%.*]]
; CHECK: first.exitStub:
-; CHECK-NEXT: store i32 [[DOTCE]], i32* [[TMP2:%.*]], align 4
+; CHECK-NEXT: store i32 [[DOTCE]], ptr [[TMP2:%.*]], align 4
; CHECK-NEXT: ret i1 true
; CHECK: next.exitStub:
; CHECK-NEXT: ret i1 false