target triple = "i386-apple-darwin8"
%struct.x = type { [4 x i32] }
-define void @foo(%struct.x* byval align 4 %X) nounwind {
+define void @foo(%struct.x* byval(%struct.x) align 4 %X) nounwind {
; CHECK: store i32 2, i32* %tmp1
entry:
%tmp = getelementptr %struct.x, %struct.x* %X, i32 0, i32 0 ; <[4 x i32]*> [#uses=1]
%tmp1 = getelementptr [4 x i32], [4 x i32]* %tmp, i32 0, i32 3 ; <i32*> [#uses=1]
store i32 2, i32* %tmp1, align 4
- %tmp2 = call i32 (...) @bar( %struct.x* byval align 4 %X ) nounwind ; <i32> [#uses=0]
+ %tmp2 = call i32 (...) @bar(%struct.x* byval(%struct.x) align 4 %X ) nounwind ; <i32> [#uses=0]
br label %return
return: ; preds = %entry
ret void
%struct.x = type { i32, i32, i32, i32 }
@g = weak global i32 0 ; <i32*> [#uses=1]
-define i32 @foo(%struct.x* byval %a) nounwind {
+define i32 @foo(%struct.x* byval(%struct.x) %a) nounwind {
; CHECK: ret i32 1
%tmp1 = tail call i32 (...) @bar( %struct.x* %a ) nounwind ; <i32> [#uses=0]
%tmp2 = getelementptr %struct.x, %struct.x* %a, i32 0, i32 0 ; <i32*> [#uses=2]
ret i32 %tmp
}
-define i32 @byval_and_deref_arg_1(i32* byval %obj, i64* dereferenceable(8) %arg) {
+define i32 @byval_and_deref_arg_1(i32* byval(i32) %obj, i64* dereferenceable(8) %arg) {
; CHECK: Function: byval_and_deref_arg_1: 2 pointers, 0 call sites
; CHECK-NEXT: NoAlias: i32* %obj, i64* %arg
bb:
ret i32 %tmp
}
-define i32 @byval_and_deref_arg_2(i32* byval %obj, i32* dereferenceable(8) %arg) {
+define i32 @byval_and_deref_arg_2(i32* byval(i32) %obj, i32* dereferenceable(8) %arg) {
; CHECK: Function: byval_and_deref_arg_2: 2 pointers, 0 call sites
; CHECK-NEXT: NoAlias: i32* %arg, i32* %obj
bb:
ret i32 %tmp
}
-define i32 @byval_and_deref_arg_non_deref_1(i32* byval %obj, i64* dereferenceable(2) %arg) {
+define i32 @byval_and_deref_arg_non_deref_1(i32* byval(i32) %obj, i64* dereferenceable(2) %arg) {
; CHECK: Function: byval_and_deref_arg_non_deref_1: 2 pointers, 0 call sites
; CHECK-NEXT: NoAlias: i32* %obj, i64* %arg
bb:
ret i32 %tmp
}
-define i32 @byval_and_deref_arg_non_deref_2(i32* byval %obj, i32* dereferenceable(2) %arg) {
+define i32 @byval_and_deref_arg_non_deref_2(i32* byval(i32) %obj, i32* dereferenceable(2) %arg) {
; CHECK: Function: byval_and_deref_arg_non_deref_2: 2 pointers, 0 call sites
; CHECK-NEXT: NoAlias: i32* %arg, i32* %obj
bb:
; RUN: opt -basic-aa -aa-eval -print-all-alias-modref-info -disable-output < %s 2>&1 | FileCheck %s
-declare void @takebyval(i32* byval %p)
+declare void @takebyval(i32* byval(i32) %p)
define i32 @tailbyval() {
entry:
%p = alloca i32
store i32 42, i32* %p
- tail call void @takebyval(i32* byval %p)
+ tail call void @takebyval(i32* byval(i32) %p)
%rv = load i32, i32* %p
ret i32 %rv
}
; FIXME: This should be Just Ref.
; CHECK-LABEL: Function: tailbyval: 1 pointers, 1 call sites
-; CHECK-NEXT: Both ModRef: Ptr: i32* %p <-> tail call void @takebyval(i32* byval %p)
+; CHECK-NEXT: Both ModRef: Ptr: i32* %p <-> tail call void @takebyval(i32* byval(i32) %p)
; CHECK: DIVERGENT:
; CHECK: DIVERGENT:
; CHECK: DIVERGENT:
-define void @test_c([4 x <16 x i8>] addrspace(5)* byval %arg0, float inreg %arg1, i32 inreg %arg2, <2 x i32> %arg3, <3 x i32> %arg4, float %arg5, i32 %arg6) #0 {
+define void @test_c([4 x <16 x i8>] addrspace(5)* byval([4 x <16 x i8>]) %arg0, float inreg %arg1, i32 inreg %arg2, <2 x i32> %arg3, <3 x i32> %arg4, float %arg5, i32 %arg6) #0 {
ret void
}
; CHECK: DIVERGENT:
; CHECK: DIVERGENT:
; CHECK: DIVERGENT:
-define void @test_c([4 x <16 x i8>] addrspace(4)* byval %arg0, float inreg %arg1, i32 inreg %arg2, <2 x i32> %arg3, <3 x i32> %arg4, float %arg5, i32 %arg6) #0 {
+define void @test_c([4 x <16 x i8>] addrspace(4)* byval([4 x <16 x i8>]) %arg0, float inreg %arg1, i32 inreg %arg2, <2 x i32> %arg3, <3 x i32> %arg4, float %arg5, i32 %arg6) #0 {
ret void
}
; CHECK: Unusual: noalias argument aliases another argument
; CHECK-NEXT: call void @f1(%s* sret %c, %s* %c)
-declare void @f3(%s* noalias nocapture sret, %s* byval nocapture readnone)
+declare void @f3(%s* noalias nocapture sret, %s* byval(%s) nocapture readnone)
define void @f4() {
entry:
%0 = bitcast %s* %c to i8*
%1 = bitcast %s* %tmp to i8*
call void @llvm.memset.p0i8.i32(i8* %0, i8 0, i32 1, i1 false)
- call void @f3(%s* sret %c, %s* byval %c)
+ call void @f3(%s* sret %c, %s* byval(%s) %c)
ret void
}
; noalias, since the other one is byval, effectively copying the data to the
; stack instead of passing the pointer itself.
; CHECK-NOT: Unusual: noalias argument aliases another argument
-; CHECK-NOT: call void @f3(%s* sret %c, %s* %c)
+; CHECK-NOT: call void @f3(%s* sret %c, %s* byval(%s) %c)
attributes #0 = { argmemonly nounwind }
; CHECK: Undefined behavior: Call with "tail" keyword references alloca
; CHECK-NEXT: tail call void @f1(%s* %c)
-declare void @f3(%s* byval)
+declare void @f3(%s* byval(%s))
define void @f4() {
entry:
%c = alloca %s
- tail call void @f3(%s* byval %c)
+ tail call void @f3(%s* byval(%s) %c)
ret void
}
; byval, effectively copying the data to the stack instead of leaking the
; pointer itself.
; CHECK-NOT: Undefined behavior: Call with "tail" keyword references alloca
-; CHECK-NOT: tail call void @f3(%s* byval %c)
+; CHECK-NOT: tail call void @f3(%s* byval(%s) %c)
ret void
}
-define void @ByVal(i16* byval %p) {
+define void @ByVal(i16* byval(i16) %p) {
; CHECK-LABEL: @ByVal dso_preemptable{{$}}
; CHECK-NEXT: args uses:
; CHECK-NEXT: allocas uses:
; CHECK-EMPTY:
entry:
%x = alloca i16, align 4
- call void @ByVal(i16* byval %x)
+ call void @ByVal(i16* byval(i16) %x)
%y = alloca i64, align 4
%y1 = bitcast i64* %y to i16*
- call void @ByVal(i16* byval %y1)
-
+ call void @ByVal(i16* byval(i16) %y1)
+
ret void
}
-declare void @ByValArray([100000 x i64]* byval %p)
+declare void @ByValArray([100000 x i64]* byval([100000 x i64]) %p)
define void @TestByValArray() {
; CHECK-LABEL: @TestByValArray dso_preemptable{{$}}
%z1 = bitcast [100000 x i64]* %z to i8*
%z2 = getelementptr i8, i8* %z1, i64 500000
%z3 = bitcast i8* %z2 to [100000 x i64]*
- call void @ByValArray([100000 x i64]* byval %z3)
+ call void @ByValArray([100000 x i64]* byval([100000 x i64]) %z3)
ret void
}
i32 addrspace(1)* dereferenceable(8) %dparam,
i8 addrspace(1)* dereferenceable(32) align 1 %dparam.align1,
i8 addrspace(1)* dereferenceable(32) align 16 %dparam.align16,
- i8* byval %i8_byval,
- %struct.A* byval %A_byval)
+ i8* byval(i8) %i8_byval,
+ %struct.A* byval(%struct.A) %A_byval)
gc "statepoint-example" {
; CHECK: The following are dereferenceable:
entry:
define i32 @f_0(i32 %val) {
%ptr = inttoptr i32 %val to i32*, !dereferenceable !0
%load29 = load i32, i32* %ptr, align 8
- ret i32 %load29
+ ret i32 %load29
}
; Just check that we don't crash.
declare void @foo(...)
define void @bar() {
- call void (...) @foo(%struct* byval null )
+ call void (...) @foo(%struct* byval(%struct) null )
ret void
}
define void @caller({ i32*, i8 }* %ptr) personality i8* bitcast (i32 (...)* @__gxx_personality_v0 to i8*) {
; CHECK: call void @bar({ i32*, i8 }* byval({ i32*, i8 }) %ptr)
; CHECK: invoke void @bar({ i32*, i8 }* byval({ i32*, i8 }) %ptr)
- call void @bar({i32*, i8}* byval %ptr)
- invoke void @bar({i32*, i8}* byval %ptr) to label %success unwind label %fail
+ call void @bar({i32*, i8}* byval({i32*, i8}) %ptr)
+ invoke void @bar({i32*, i8}* byval({i32*, i8}) %ptr) to label %success unwind label %fail
success:
ret void
; RUN: not llvm-as < %s -o /dev/null 2>&1 | FileCheck %s
; CHECK: Attribute 'immarg' is incompatible with other attributes
-declare void @llvm.immarg.byval(i32* byval immarg)
+declare void @llvm.immarg.byval(i32* byval(i32) immarg)
; CHECK: Attribute 'immarg' is incompatible with other attributes
declare void @llvm.immarg.inalloca(i32* inalloca immarg)
ret void;
}
-define void @f8(i8* byval %0)
+define void @f8(i8* byval(i8) %0)
; CHECK: define void @f8(i8* byval(i8) %0)
{
ret void;
ret void;
}
-define void @f8(i8* byval %0)
+define void @f8(i8* byval(i8) %0)
; CHECK: define void @f8(i8* byval(i8) %0)
{
ret void;
; CHECK: declare void @f.param.signext(i8 signext)
declare void @f.param.inreg(i8 inreg)
; CHECK: declare void @f.param.inreg(i8 inreg)
-declare void @f.param.byval({ i8, i8 }* byval)
+declare void @f.param.byval({ i8, i8 }* byval({ i8, i8 }))
; CHECK: declare void @f.param.byval({ i8, i8 }* byval({ i8, i8 }))
declare void @f.param.inalloca(i8* inalloca)
; CHECK: declare void @f.param.inalloca(i8* inalloca)
; CHECK: declare void @f.param.signext(i8 signext)
declare void @f.param.inreg(i8 inreg)
; CHECK: declare void @f.param.inreg(i8 inreg)
-declare void @f.param.byval({ i8, i8 }* byval)
+declare void @f.param.byval({ i8, i8 }* byval({ i8, i8 }))
; CHECK: declare void @f.param.byval({ i8, i8 }* byval({ i8, i8 }))
declare void @f.param.inalloca(i8* inalloca)
; CHECK: declare void @f.param.inalloca(i8* inalloca)
; CHECK: declare void @f.param.signext(i8 signext)
declare void @f.param.inreg(i8 inreg)
; CHECK: declare void @f.param.inreg(i8 inreg)
-declare void @f.param.byval({ i8, i8 }* byval)
+declare void @f.param.byval({ i8, i8 }* byval({ i8, i8 }))
; CHECK: declare void @f.param.byval({ i8, i8 }* byval({ i8, i8 }))
declare void @f.param.inalloca(i8* inalloca)
; CHECK: declare void @f.param.inalloca(i8* inalloca)
; CHECK: declare void @f.param.signext(i8 signext)
declare void @f.param.inreg(i8 inreg)
; CHECK: declare void @f.param.inreg(i8 inreg)
-declare void @f.param.byval({ i8, i8 }* byval)
+declare void @f.param.byval({ i8, i8 }* byval({ i8, i8 }))
; CHECK: declare void @f.param.byval({ i8, i8 }* byval({ i8, i8 }))
declare void @f.param.inalloca(i8* inalloca)
; CHECK: declare void @f.param.inalloca(i8* inalloca)
; CHECK: declare void @f.param.signext(i8 signext)
declare void @f.param.inreg(i8 inreg)
; CHECK: declare void @f.param.inreg(i8 inreg)
-declare void @f.param.byval({ i8, i8 }* byval)
+declare void @f.param.byval({ i8, i8 }* byval({ i8, i8 }))
; CHECK: declare void @f.param.byval({ i8, i8 }* byval({ i8, i8 }))
declare void @f.param.inalloca(i8* inalloca)
; CHECK: declare void @f.param.inalloca(i8* inalloca)
; CHECK: declare void @f.param.signext(i8 signext)
declare void @f.param.inreg(i8 inreg)
; CHECK: declare void @f.param.inreg(i8 inreg)
-declare void @f.param.byval({ i8, i8 }* byval)
+declare void @f.param.byval({ i8, i8 }* byval({ i8, i8 }))
; CHECK: declare void @f.param.byval({ i8, i8 }* byval({ i8, i8 }))
declare void @f.param.inalloca(i8* inalloca)
; CHECK: declare void @f.param.inalloca(i8* inalloca)
; CHECK: declare void @f.param.signext(i8 signext)
declare void @f.param.inreg(i8 inreg)
; CHECK: declare void @f.param.inreg(i8 inreg)
-declare void @f.param.byval({ i8, i8 }* byval)
+declare void @f.param.byval({ i8, i8 }* byval({ i8, i8 }))
; CHECK: declare void @f.param.byval({ i8, i8 }* byval({ i8, i8 }))
declare void @f.param.inalloca(i8* inalloca)
; CHECK: declare void @f.param.inalloca(i8* inalloca)
; CHECK: declare void @f.param.signext(i8 signext)
declare void @f.param.inreg(i8 inreg)
; CHECK: declare void @f.param.inreg(i8 inreg)
-declare void @f.param.byval({ i8, i8 }* byval)
+declare void @f.param.byval({ i8, i8 }* byval({ i8, i8 }))
; CHECK: declare void @f.param.byval({ i8, i8 }* byval({ i8, i8 }))
declare void @f.param.inalloca(i8* inalloca)
; CHECK: declare void @f.param.inalloca(i8* inalloca)
; CHECK: declare void @ParamAttr5(i8* inreg)
declare void @ParamAttr5(i8* inreg)
; CHECK: declare void @ParamAttr6(i8* byval(i8))
-declare void @ParamAttr6(i8* byval)
+declare void @ParamAttr6(i8* byval(i8))
; CHECK: declare void @ParamAttr7(i8* noalias)
declare void @ParamAttr7(i8* noalias)
; CHECK: declare void @ParamAttr8(i8* nocapture)
; CHECK: declare void @ParamAttr10{{[(i8* sret noalias nocapture) | (i8* noalias nocapture sret)]}}
declare void @ParamAttr10(i8* sret noalias nocapture)
;CHECK: declare void @ParamAttr11{{[(i8* byval(i8) noalias nocapture) | (i8* noalias nocapture byval(i8))]}}
-declare void @ParamAttr11(i8* byval noalias nocapture)
+declare void @ParamAttr11(i8* byval(i8) noalias nocapture)
;CHECK: declare void @ParamAttr12{{[(i8* inreg noalias nocapture) | (i8* noalias nocapture inreg)]}}
declare void @ParamAttr12(i8* inreg noalias nocapture)
}
; Shouldn't tail call when the caller has byval arguments.
-define void @test_byval(i8* byval %ptr) {
+define void @test_byval(i8* byval(i8) %ptr) {
; COMMON-LABEL: name: test_byval
; COMMON: bb.1 (%ir-block.0):
; COMMON: [[FRAME_INDEX:%[0-9]+]]:_(p0) = G_FRAME_INDEX %fixed-stack.0
; CHECK: stur {{.*}}, [x29, #{{.*}}] // 8-byte Folded Spill
; CHECK: ldur {{.*}}, [x29, #{{.*}}] // 8-byte Folded Reload
target triple = "aarch64--"
-declare void @extfunc([4096 x i64]* byval %p)
+declare void @extfunc([4096 x i64]* byval([4096 x i64]) %p)
define void @func([4096 x i64]* %z) {
%lvar = alloca [31 x i8]
%v = load volatile [31 x i8], [31 x i8]* %lvar
store volatile [31 x i8] %v, [31 x i8]* %lvar
- call void @extfunc([4096 x i64]* byval %z)
+ call void @extfunc([4096 x i64]* byval([4096 x i64]) %z)
ret void
}
; RUN: llc -mtriple=aarch64-linux-gnu %s -o - | FileCheck %s
-define i8 @byval_match(i8* byval(i8) align 1, i8* byval %ptr) {
+define i8 @byval_match(i8* byval(i8) align 1, i8* byval(i8) %ptr) {
; CHECK-LABEL: byval_match:
; CHECK: ldrb w0, [sp, #8]
%res = load i8, i8* %ptr
; CHECK: ldrb [[P0:w[0-9]+]], [x0]
; CHECK: strb [[P0]], [sp]
; CHECK: bl byval_match
- call i8 @byval_match(i8* byval(i8) align 1 %p0, i8* byval %p1)
+ call i8 @byval_match(i8* byval(i8) align 1 %p0, i8* byval(i8) %p1)
ret void
}
-define i8 @byval_large([3 x i64]* byval([3 x i64]) align 8, i8* byval %ptr) {
+define i8 @byval_large([3 x i64]* byval([3 x i64]) align 8, i8* byval(i8) %ptr) {
; CHECK-LABEL: byval_large:
; CHECK: ldrb w0, [sp, #24]
%res = load i8, i8* %ptr
; CHECK: str [[P0HI]], [sp, #16]
; CHECK: str [[P0LO]], [sp]
; CHECK: bl byval_large
- call i8 @byval_large([3 x i64]* byval([3 x i64]) align 8 %p0, i8* byval %p1)
+ call i8 @byval_large([3 x i64]* byval([3 x i64]) align 8 %p0, i8* byval(i8) %p1)
ret void
}
; byval pointers should be allocated to the stack and copied as if
; with memcpy.
-define void @take_struct(%myStruct* byval %structval) {
+define void @take_struct(%myStruct* byval(%myStruct) %structval) {
; CHECK-LABEL: take_struct:
%addr0 = getelementptr %myStruct, %myStruct* %structval, i64 0, i32 2
%addr1 = getelementptr %myStruct, %myStruct* %structval, i64 0, i32 0
}
; %structval should be at sp + 16
-define void @check_byval_align(i32* byval %ignore, %myStruct* byval align 16 %structval) {
+define void @check_byval_align(i32* byval(i32) %ignore, %myStruct* byval(%myStruct) align 16 %structval) {
; CHECK-LABEL: check_byval_align:
%addr0 = getelementptr %myStruct, %myStruct* %structval, i64 0, i32 2
; available, but it needs two). Also make sure that %stacked doesn't
; sneak into x7 behind.
define i32 @struct_on_stack(i8 %var0, i16 %var1, i32 %var2, i64 %var3, i128 %var45,
- i32* %var6, %myStruct* byval %struct, i32* byval %stacked,
+ i32* %var6, %myStruct* byval(%myStruct) %struct, i32* byval(i32) %stacked,
double %notstacked) {
; CHECK-LABEL: struct_on_stack:
%addr = getelementptr %myStruct, %myStruct* %struct, i64 0, i32 0
declare i32 @struct_on_stack(i8 %var0, i16 %var1, i32 %var2, i64 %var3, i128 %var45,
- i32* %var6, %myStruct* byval %struct, i32 %stacked,
+ i32* %var6, %myStruct* byval(%myStruct) %struct, i32 %stacked,
double %notstacked)
declare void @stacked_fpu(float %var0, double %var1, float %var2, float %var3,
float %var4, float %var5, float %var6, float %var7,
define void @check_stack_args() {
; CHECK-LABEL: check_stack_args:
call i32 @struct_on_stack(i8 0, i16 12, i32 42, i64 99, i128 1,
- i32* @var32, %myStruct* byval @varstruct,
+ i32* @var32, %myStruct* byval(%myStruct) @varstruct,
i32 999, double 1.0)
; Want to check that the final double is passed in registers and
; that varstruct is passed on the stack. Rather dependent on how a
%struct2 = type { i64, i64, i64 }
-declare void @consume_attributes(i32, i8* nest, i32, %struct2* byval)
+declare void @consume_attributes(i32, i8* nest, i32, %struct2* byval(%struct2))
-define void @test_attributes(%struct2* byval %s) gc "statepoint-example" {
+define void @test_attributes(%struct2* byval(%struct2) %s) gc "statepoint-example" {
; CHECK-LABEL: test_attributes:
; CHECK: // %bb.0: // %entry
; CHECK-NEXT: sub sp, sp, #32 // =32
entry:
; Check that arguments with attributes are lowered correctly.
; We call a function that has a nest argument and a byval argument.
- %statepoint_token = call token (i64, i32, void (i32, i8*, i32, %struct2*)*, i32, i32, ...) @llvm.experimental.gc.statepoint.p0f_isVoidi32p0i8i32p0s_struct2sf(i64 0, i32 0, void (i32, i8*, i32, %struct2*)* @consume_attributes, i32 4, i32 0, i32 42, i8* nest null, i32 17, %struct2* byval %s, i32 0, i32 0)
+ %statepoint_token = call token (i64, i32, void (i32, i8*, i32, %struct2*)*, i32, i32, ...) @llvm.experimental.gc.statepoint.p0f_isVoidi32p0i8i32p0s_struct2sf(i64 0, i32 0, void (i32, i8*, i32, %struct2*)* @consume_attributes, i32 4, i32 0, i32 42, i8* nest null, i32 17, %struct2* byval(%struct2) %s, i32 0, i32 0)
ret void
}
; NOTE: Assertions have been autogenerated by utils/update_mir_test_checks.py
; RUN: llc -global-isel -amdgpu-fixed-function-abi -mtriple=amdgcn-amd-amdhsa -mcpu=gfx900 -stop-after=irtranslator -verify-machineinstrs < %s | FileCheck -check-prefix=GCN %s
-declare hidden void @external_void_func_sret_struct_i8_i32_byval_struct_i8_i32({ i8, i32 } addrspace(5)* sret, { i8, i32 } addrspace(5)* byval) #0
+declare hidden void @external_void_func_sret_struct_i8_i32_byval_struct_i8_i32({ i8, i32 } addrspace(5)* sret({ i8, i32 }), { i8, i32 } addrspace(5)* byval({ i8, i32 })) #0
define amdgpu_kernel void @test_call_external_void_func_sret_struct_i8_i32_byval_struct_i8_i32(i32) #0 {
; GCN-LABEL: name: test_call_external_void_func_sret_struct_i8_i32_byval_struct_i8_i32
; Structs
declare hidden void @external_void_func_struct_i8_i32({ i8, i32 }) #0
-declare hidden void @external_void_func_byval_struct_i8_i32({ i8, i32 } addrspace(5)* byval) #0
-declare hidden void @external_void_func_sret_struct_i8_i32_byval_struct_i8_i32({ i8, i32 } addrspace(5)* sret, { i8, i32 } addrspace(5)* byval) #0
+declare hidden void @external_void_func_byval_struct_i8_i32({ i8, i32 } addrspace(5)* byval({ i8, i32 })) #0
+declare hidden void @external_void_func_sret_struct_i8_i32_byval_struct_i8_i32({ i8, i32 } addrspace(5)* sret({ i8, i32 }), { i8, i32 } addrspace(5)* byval({ i8, i32 })) #0
declare hidden void @external_void_func_v2i8(<2 x i8>) #0
declare hidden void @external_void_func_v3i8(<3 x i8>) #0
declare hidden void @external_void_func_v8i8(<8 x i8>) #0
declare hidden void @external_void_func_v16i8(<16 x i8>) #0
-declare hidden void @byval_align16_f64_arg(<32 x i32>, double addrspace(5)* byval align 16) #0
+declare hidden void @byval_align16_f64_arg(<32 x i32>, double addrspace(5)* byval(double) align 16) #0
declare hidden void @stack_passed_f64_arg(<32 x i32>, double) #0
declare hidden void @external_void_func_12xv3i32(<3 x i32>, <3 x i32>, <3 x i32>, <3 x i32>,
<3 x i32>, <3 x i32>, <3 x i32>, <3 x i32>, <3 x i32>, <3 x i32>, <3 x i32>, <3 x i32>) #0
ret void
}
-define void @void_func_byval_struct_i8_i32({ i8, i32 } addrspace(5)* byval %arg0) #0 {
+define void @void_func_byval_struct_i8_i32({ i8, i32 } addrspace(5)* byval({ i8, i32 }) %arg0) #0 {
; CHECK-LABEL: name: void_func_byval_struct_i8_i32
; CHECK: bb.1 (%ir-block.0):
; CHECK: liveins: $sgpr30_sgpr31
ret void
}
-define void @void_func_byval_struct_i8_i32_x2({ i8, i32 } addrspace(5)* byval %arg0, { i8, i32 } addrspace(5)* byval %arg1, i32 %arg2) #0 {
+define void @void_func_byval_struct_i8_i32_x2({ i8, i32 } addrspace(5)* byval({ i8, i32 }) %arg0, { i8, i32 } addrspace(5)* byval({ i8, i32 }) %arg1, i32 %arg2) #0 {
; CHECK-LABEL: name: void_func_byval_struct_i8_i32_x2
; CHECK: bb.1 (%ir-block.0):
; CHECK: liveins: $vgpr0, $sgpr30_sgpr31
ret void
}
-define void @void_func_byval_i32_byval_i64(i32 addrspace(5)* byval %arg0, i64 addrspace(5)* byval %arg1) #0 {
+define void @void_func_byval_i32_byval_i64(i32 addrspace(5)* byval(i32) %arg0, i64 addrspace(5)* byval(i64) %arg1) #0 {
; CHECK-LABEL: name: void_func_byval_i32_byval_i64
; CHECK: bb.1 (%ir-block.0):
; CHECK: liveins: $sgpr30_sgpr31
; GCN: [[BB1]]
; GCN: s_or_b64 exec, exec
-define hidden void @void_func_byval_struct_use_outside_entry_block(%struct.ByValStruct addrspace(5)* byval noalias nocapture align 4 %arg0, %struct.ByValStruct addrspace(5)* byval noalias nocapture align 4 %arg1, i1 %cond) #1 {
+define hidden void @void_func_byval_struct_use_outside_entry_block(%struct.ByValStruct addrspace(5)* byval(%struct.ByValStruct) noalias nocapture align 4 %arg0, %struct.ByValStruct addrspace(5)* byval(%struct.ByValStruct) noalias nocapture align 4 %arg1, i1 %cond) #1 {
entry:
br i1 %cond, label %bb0, label %bb1
; Structs
declare hidden void @external_void_func_struct_i8_i32({ i8, i32 }) #0
-declare hidden void @external_void_func_byval_struct_i8_i32({ i8, i32 } addrspace(5)* byval) #0
-declare hidden void @external_void_func_sret_struct_i8_i32_byval_struct_i8_i32({ i8, i32 } addrspace(5)* sret, { i8, i32 } addrspace(5)* byval) #0
+declare hidden void @external_void_func_byval_struct_i8_i32({ i8, i32 } addrspace(5)* byval({ i8, i32 })) #0
+declare hidden void @external_void_func_sret_struct_i8_i32_byval_struct_i8_i32({ i8, i32 } addrspace(5)* sret, { i8, i32 } addrspace(5)* byval({ i8, i32 })) #0
declare hidden void @external_void_func_v16i8(<16 x i8>) #0
%gep1 = getelementptr inbounds { i8, i32 }, { i8, i32 } addrspace(5)* %val, i32 0, i32 1
store i8 3, i8 addrspace(5)* %gep0
store i32 8, i32 addrspace(5)* %gep1
- call void @external_void_func_byval_struct_i8_i32({ i8, i32 } addrspace(5)* %val)
+ call void @external_void_func_byval_struct_i8_i32({ i8, i32 } addrspace(5)* byval({ i8, i32 }) %val)
ret void
}
%in.gep1 = getelementptr inbounds { i8, i32 }, { i8, i32 } addrspace(5)* %in.val, i32 0, i32 1
store i8 3, i8 addrspace(5)* %in.gep0
store i32 8, i32 addrspace(5)* %in.gep1
- call void @external_void_func_sret_struct_i8_i32_byval_struct_i8_i32({ i8, i32 } addrspace(5)* %out.val, { i8, i32 } addrspace(5)* %in.val)
+ call void @external_void_func_sret_struct_i8_i32_byval_struct_i8_i32({ i8, i32 } addrspace(5)* %out.val, { i8, i32 } addrspace(5)* byval({ i8, i32 }) %in.val)
%out.gep0 = getelementptr inbounds { i8, i32 }, { i8, i32 } addrspace(5)* %out.val, i32 0, i32 0
%out.gep1 = getelementptr inbounds { i8, i32 }, { i8, i32 } addrspace(5)* %out.val, i32 0, i32 1
%out.val0 = load i8, i8 addrspace(5)* %out.gep0
define void @tail_call_byval_align16(<32 x i32> %val, double %tmp) #0 {
entry:
%alloca = alloca double, align 8, addrspace(5)
- tail call void @byval_align16_f64_arg(<32 x i32> %val, double addrspace(5)* byval align 16 %alloca)
+ tail call void @byval_align16_f64_arg(<32 x i32> %val, double addrspace(5)* byval(double) align 16 %alloca)
ret void
}
ret void
}
-declare hidden void @byval_align16_f64_arg(<32 x i32>, double addrspace(5)* byval align 16) #0
+declare hidden void @byval_align16_f64_arg(<32 x i32>, double addrspace(5)* byval(double) align 16) #0
declare hidden void @stack_passed_f64_arg(<32 x i32>, double) #0
declare hidden void @external_void_func_12xv3i32(<3 x i32>, <3 x i32>, <3 x i32>, <3 x i32>,
<3 x i32>, <3 x i32>, <3 x i32>, <3 x i32>, <3 x i32>, <3 x i32>, <3 x i32>, <3 x i32>) #0
; GCN-NEXT: s_mov_b64 exec, [[COPY_EXEC1]]
; GCN-NEXT: s_waitcnt vmcnt(0)
; GCN-NEXT: s_setpc_b64
-define void @scratch_reg_needed_mubuf_offset([4096 x i8] addrspace(5)* byval align 4 %arg) #1 {
+define void @scratch_reg_needed_mubuf_offset([4096 x i8] addrspace(5)* byval([4096 x i8]) align 4 %arg) #1 {
%alloca = alloca i32, addrspace(5)
store volatile i32 0, i32 addrspace(5)* %alloca
; FLATSCR: s_add_u32 [[SOFF:s[0-9]+]], s33, 0x1004
; FLATSCR: v_mov_b32_e32 v0, 0
; FLATSCR: scratch_store_dword off, v0, [[SOFF]]
-define void @spill_fp_to_memory_scratch_reg_needed_mubuf_offset([4096 x i8] addrspace(5)* byval align 4 %arg) #3 {
+define void @spill_fp_to_memory_scratch_reg_needed_mubuf_offset([4096 x i8] addrspace(5)* byval([4096 x i8]) align 4 %arg) #3 {
%alloca = alloca i32, addrspace(5)
store volatile i32 0, i32 addrspace(5)* %alloca
i32 %arg0, i32 %arg1, i32 %arg2, i32 %arg3, i32 %arg4, i32 %arg5, i32 %arg6, i32 %arg7,
i32 %arg8, i32 %arg9, i32 %arg10, i32 %arg11, i32 %arg12, i32 %arg13, i32 %arg14, i32 %arg15,
i32 %arg16, i32 %arg17, i32 %arg18, i32 %arg19, i32 %arg20, i32 %arg21, i32 %arg22, i32 %arg23,
- i32 %arg24, i32 %arg25, i32 %arg26, i32 %arg27, i32 %arg28, i32 %arg29, i32 %arg30, i32 %arg31, i32 addrspace(5)* byval %arg32) #1 {
+ i32 %arg24, i32 %arg25, i32 %arg26, i32 %arg27, i32 %arg28, i32 %arg29, i32 %arg30, i32 %arg31, i32 addrspace(5)* byval(i32) %arg32) #1 {
%val = call i32 @llvm.amdgcn.workitem.id.x()
store volatile i32 %val, i32 addrspace(1)* undef
store i8 %b, i8 addrspace(5)* %block.captured1, align 8
%tmp1 = bitcast <{ i32, i32, i8 addrspace(1)*, i8 }> addrspace(5)* %block to void () addrspace(5)*
%tmp4 = addrspacecast void () addrspace(5)* %tmp1 to i8*
- %tmp5 = call i32 @__enqueue_kernel_basic(%opencl.queue_t addrspace(1)* undef, i32 0, %struct.ndrange_t addrspace(5)* byval nonnull %tmp,
+ %tmp5 = call i32 @__enqueue_kernel_basic(%opencl.queue_t addrspace(1)* undef, i32 0, %struct.ndrange_t addrspace(5)* byval(%struct.ndrange_t) nonnull %tmp,
i8* bitcast (void (<{ i32, i32, i8 addrspace(1)*, i8 }>)* @__test_block_invoke_kernel to i8*), i8* nonnull %tmp4) #2
- %tmp10 = call i32 @__enqueue_kernel_basic(%opencl.queue_t addrspace(1)* undef, i32 0, %struct.ndrange_t addrspace(5)* byval nonnull %tmp,
+ %tmp10 = call i32 @__enqueue_kernel_basic(%opencl.queue_t addrspace(1)* undef, i32 0, %struct.ndrange_t addrspace(5)* byval(%struct.ndrange_t) nonnull %tmp,
i8* bitcast (void (<{ i32, i32, i8 addrspace(1)*, i8 }>)* @__test_block_invoke_kernel to i8*), i8* nonnull %tmp4) #2
- %tmp11 = call i32 @__enqueue_kernel_basic(%opencl.queue_t addrspace(1)* undef, i32 0, %struct.ndrange_t addrspace(5)* byval nonnull %tmp,
+ %tmp11 = call i32 @__enqueue_kernel_basic(%opencl.queue_t addrspace(1)* undef, i32 0, %struct.ndrange_t addrspace(5)* byval(%struct.ndrange_t) nonnull %tmp,
i8* bitcast (void (<{ i32, i32, i8 addrspace(1)*, i8 }>)* @0 to i8*), i8* nonnull %tmp4) #2
- %tmp12 = call i32 @__enqueue_kernel_basic(%opencl.queue_t addrspace(1)* undef, i32 0, %struct.ndrange_t addrspace(5)* byval nonnull %tmp,
+ %tmp12 = call i32 @__enqueue_kernel_basic(%opencl.queue_t addrspace(1)* undef, i32 0, %struct.ndrange_t addrspace(5)* byval(%struct.ndrange_t) nonnull %tmp,
i8* bitcast (void (<{ i32, i32, i8 addrspace(1)*, i8 }>)* @1 to i8*), i8* nonnull %tmp4) #2
%block.size4 = getelementptr inbounds <{ i32, i32, i8 addrspace(1)*, i64 addrspace(1)*, i64, i8 }>, <{ i32, i32, i8 addrspace(1)*, i64 addrspace(1)*, i64, i8 }> addrspace(5)* %block2, i32 0, i32 0
store i32 41, i32 addrspace(5)* %block.size4, align 8
store i64 %d, i64 addrspace(5)* %block.captured10, align 8
%tmp6 = bitcast <{ i32, i32, i8 addrspace(1)*, i64 addrspace(1)*, i64, i8 }> addrspace(5)* %block2 to void () addrspace(5)*
%tmp8 = addrspacecast void () addrspace(5)* %tmp6 to i8*
- %tmp9 = call i32 @__enqueue_kernel_basic(%opencl.queue_t addrspace(1)* undef, i32 0, %struct.ndrange_t addrspace(5)* byval nonnull %tmp3,
+ %tmp9 = call i32 @__enqueue_kernel_basic(%opencl.queue_t addrspace(1)* undef, i32 0, %struct.ndrange_t addrspace(5)* byval(%struct.ndrange_t) nonnull %tmp3,
i8* bitcast (void (<{ i32, i32, i8 addrspace(1)*, i64 addrspace(1)*, i64, i8 }>)* @__test_block_invoke_2_kernel to i8*), i8* nonnull %tmp8) #2
ret void
}
; GCN-NOT: v_mov
; GCN: ds_write_b32 v0, v0
-define void @void_func_byval_struct_i8_i32_ptr({ i8, i32 } addrspace(5)* byval %arg0) #0 {
+define void @void_func_byval_struct_i8_i32_ptr({ i8, i32 } addrspace(5)* byval({ i8, i32 }) %arg0) #0 {
%gep0 = getelementptr inbounds { i8, i32 }, { i8, i32 } addrspace(5)* %arg0, i32 0, i32 0
%gep1 = getelementptr inbounds { i8, i32 }, { i8, i32 } addrspace(5)* %arg0, i32 0, i32 1
%load1 = load i32, i32 addrspace(5)* %gep1
; MUBUF-NEXT: buffer_load_dword v1, off, s[0:3], s32 offset:4
; GFX9-FLATSCR-NEXT: scratch_load_ubyte v0, off, s32
; GFX9-FLATSCR-NEXT: scratch_load_dword v1, off, s32 offset:4
-define void @void_func_byval_struct_i8_i32_ptr_value({ i8, i32 } addrspace(5)* byval %arg0) #0 {
+define void @void_func_byval_struct_i8_i32_ptr_value({ i8, i32 } addrspace(5)* byval({ i8, i32 }) %arg0) #0 {
%gep0 = getelementptr inbounds { i8, i32 }, { i8, i32 } addrspace(5)* %arg0, i32 0, i32 0
%gep1 = getelementptr inbounds { i8, i32 }, { i8, i32 } addrspace(5)* %arg0, i32 0, i32 1
%load0 = load i8, i8 addrspace(5)* %gep0
; GFX9-FLATSCR: scratch_load_dword v{{[0-9]+}}, [[SP]], off offset:4{{$}}
; GCN: ds_write_b32 v{{[0-9]+}}, [[GEP]]
-define void @void_func_byval_struct_i8_i32_ptr_nonentry_block({ i8, i32 } addrspace(5)* byval %arg0, i32 %arg2) #0 {
+define void @void_func_byval_struct_i8_i32_ptr_nonentry_block({ i8, i32 } addrspace(5)* byval({ i8, i32 }) %arg0, i32 %arg2) #0 {
%cmp = icmp eq i32 %arg2, 0
br i1 %cmp, label %bb, label %ret
; GCN-DAG: buffer_load_dword v[[ELT1:[0-9]+]], off, s[0:3], s32 offset:4{{$}}
; GCN-DAG: buffer_store_dword v[[ELT1]]
; GCN-DAG: buffer_store_byte v[[ELT0]]
-define void @void_func_byval_struct_i8_i32({ i8, i32 } addrspace(5)* byval %arg0) #0 {
+define void @void_func_byval_struct_i8_i32({ i8, i32 } addrspace(5)* byval({ i8, i32 }) %arg0) #0 {
%arg0.load = load { i8, i32 }, { i8, i32 } addrspace(5)* %arg0
store { i8, i32 } %arg0.load, { i8, i32 } addrspace(1)* undef
ret void
; GCN: ds_write_b32 v0, v0
; GCN: s_setpc_b64
-define void @void_func_byval_struct_i8_i32_x2({ i8, i32 } addrspace(5)* byval %arg0, { i8, i32 } addrspace(5)* byval %arg1, i32 %arg2) #0 {
+define void @void_func_byval_struct_i8_i32_x2({ i8, i32 } addrspace(5)* byval({ i8, i32 }) %arg0, { i8, i32 } addrspace(5)* byval({ i8, i32 }) %arg1, i32 %arg2) #0 {
%arg0.load = load volatile { i8, i32 }, { i8, i32 } addrspace(5)* %arg0
%arg1.load = load volatile { i8, i32 }, { i8, i32 } addrspace(5)* %arg1
store volatile { i8, i32 } %arg0.load, { i8, i32 } addrspace(1)* undef
; GCN-DAG: buffer_load_dword v[[ARG1_LOAD1:[0-9]+]], off, s[0:3], s32 offset:12{{$}}
; GCN-DAG: buffer_store_dword v[[ARG0_LOAD]], off
; GCN-DAG: buffer_store_dwordx2 v{{\[}}[[ARG1_LOAD0]]:[[ARG1_LOAD1]]{{\]}}, off
-define void @void_func_byval_i32_byval_i64(i32 addrspace(5)* byval %arg0, i64 addrspace(5)* byval %arg1) #0 {
+define void @void_func_byval_i32_byval_i64(i32 addrspace(5)* byval(i32) %arg0, i64 addrspace(5)* byval(i64) %arg1) #0 {
%arg0.load = load i32, i32 addrspace(5)* %arg0
%arg1.load = load i64, i64 addrspace(5)* %arg1
store i32 %arg0.load, i32 addrspace(1)* undef
; Structs
declare hidden amdgpu_gfx void @external_void_func_struct_i8_i32({ i8, i32 }) #0
-declare hidden amdgpu_gfx void @external_void_func_byval_struct_i8_i32({ i8, i32 } addrspace(5)* byval) #0
-declare hidden amdgpu_gfx void @external_void_func_sret_struct_i8_i32_byval_struct_i8_i32({ i8, i32 } addrspace(5)* sret, { i8, i32 } addrspace(5)* byval) #0
+declare hidden amdgpu_gfx void @external_void_func_byval_struct_i8_i32({ i8, i32 } addrspace(5)* byval({ i8, i32 })) #0
+declare hidden amdgpu_gfx void @external_void_func_sret_struct_i8_i32_byval_struct_i8_i32({ i8, i32 } addrspace(5)* sret({ i8, i32 }), { i8, i32 } addrspace(5)* byval({ i8, i32 })) #0
declare hidden amdgpu_gfx void @external_void_func_v16i8(<16 x i8>) #0
; GFX10-NEXT: s_setpc_b64 s[4:5]
entry:
%alloca = alloca double, align 8, addrspace(5)
- tail call amdgpu_gfx void @byval_align16_f64_arg(<32 x i32> %val, double addrspace(5)* byval align 16 %alloca)
+ tail call amdgpu_gfx void @byval_align16_f64_arg(<32 x i32> %val, double addrspace(5)* byval(double) align 16 %alloca)
ret void
}
ret void
}
-declare hidden amdgpu_gfx void @byval_align16_f64_arg(<32 x i32>, double addrspace(5)* byval align 16) #0
+declare hidden amdgpu_gfx void @byval_align16_f64_arg(<32 x i32>, double addrspace(5)* byval(double) align 16) #0
declare hidden amdgpu_gfx void @stack_passed_f64_arg(<32 x i32>, double) #0
declare hidden amdgpu_gfx void @external_void_func_12xv3i32(<3 x i32>, <3 x i32>, <3 x i32>, <3 x i32>,
<3 x i32>, <3 x i32>, <3 x i32>, <3 x i32>, <3 x i32>, <3 x i32>, <3 x i32>, <3 x i32>) #0
; GFX900-NEXT: s_setpc_b64
; NO-D16-HI: buffer_load_ushort v{{[0-9]+}}, off, s[0:3], s32 offset:4094{{$}}
-define void @load_private_hi_v2i16_reglo_vreg(i16 addrspace(5)* byval %in, i16 %reg) #0 {
+define void @load_private_hi_v2i16_reglo_vreg(i16 addrspace(5)* byval(i16) %in, i16 %reg) #0 {
entry:
%gep = getelementptr inbounds i16, i16 addrspace(5)* %in, i64 2047
%load = load i16, i16 addrspace(5)* %gep
; GFX900-NEXT: s_setpc_b64
; NO-D16-HI: buffer_load_ushort v{{[0-9]+}}, off, s[0:3], s32 offset:4094{{$}}
-define void @load_private_hi_v2f16_reglo_vreg(half addrspace(5)* byval %in, half %reg) #0 {
+define void @load_private_hi_v2f16_reglo_vreg(half addrspace(5)* byval(half) %in, half %reg) #0 {
entry:
%gep = getelementptr inbounds half, half addrspace(5)* %in, i64 2047
%load = load half, half addrspace(5)* %gep
; GFX900-NEXT: s_setpc_b64
; NO-D16-HI: buffer_load_ushort v{{[0-9]+}}, off, s[0:3], 0 offset:4094{{$}}
-define void @load_private_hi_v2i16_reglo_vreg_nooff(i16 addrspace(5)* byval %in, i16 %reg) #0 {
+define void @load_private_hi_v2i16_reglo_vreg_nooff(i16 addrspace(5)* byval(i16) %in, i16 %reg) #0 {
entry:
%load = load volatile i16, i16 addrspace(5)* inttoptr (i32 4094 to i16 addrspace(5)*)
%build0 = insertelement <2 x i16> undef, i16 %reg, i32 0
; GFX900-NEXT: s_setpc_b64
; NO-D16-HI: buffer_load_ubyte v{{[0-9]+}}, off, s[0:3], s32 offset:4095{{$}}
-define void @load_private_hi_v2i16_reglo_vreg_zexti8(i8 addrspace(5)* byval %in, i16 %reg) #0 {
+define void @load_private_hi_v2i16_reglo_vreg_zexti8(i8 addrspace(5)* byval(i8) %in, i16 %reg) #0 {
entry:
%gep = getelementptr inbounds i8, i8 addrspace(5)* %in, i64 4095
%load = load i8, i8 addrspace(5)* %gep
; GFX900-NEXT: s_setpc_b64
; NO-D16-HI: buffer_load_ubyte v{{[0-9]+}}, off, s[0:3], s32 offset:4095{{$}}
-define void @load_private_hi_v2f16_reglo_vreg_zexti8(i8 addrspace(5)* byval %in, half %reg) #0 {
+define void @load_private_hi_v2f16_reglo_vreg_zexti8(i8 addrspace(5)* byval(i8) %in, half %reg) #0 {
entry:
%gep = getelementptr inbounds i8, i8 addrspace(5)* %in, i64 4095
%load = load i8, i8 addrspace(5)* %gep
; GFX900-NEXT: s_setpc_b64
; NO-D16-HI: buffer_load_sbyte v{{[0-9]+}}, off, s[0:3], s32 offset:4095{{$}}
-define void @load_private_hi_v2f16_reglo_vreg_sexti8(i8 addrspace(5)* byval %in, half %reg) #0 {
+define void @load_private_hi_v2f16_reglo_vreg_sexti8(i8 addrspace(5)* byval(i8) %in, half %reg) #0 {
entry:
%gep = getelementptr inbounds i8, i8 addrspace(5)* %in, i64 4095
%load = load i8, i8 addrspace(5)* %gep
; GFX900-NEXT: s_setpc_b64
; NO-D16-HI: buffer_load_sbyte v{{[0-9]+}}, off, s[0:3], s32 offset:4095{{$}}
-define void @load_private_hi_v2i16_reglo_vreg_sexti8(i8 addrspace(5)* byval %in, i16 %reg) #0 {
+define void @load_private_hi_v2i16_reglo_vreg_sexti8(i8 addrspace(5)* byval(i8) %in, i16 %reg) #0 {
entry:
%gep = getelementptr inbounds i8, i8 addrspace(5)* %in, i64 4095
%load = load i8, i8 addrspace(5)* %gep
; GFX900-FLATSCR-NEXT: scratch_load_short_d16_hi v0, off, s32 offset:2
; GFX900-NEXT: s_waitcnt
; GFX900-NEXT: s_setpc_b64
-define <2 x i16> @load_private_v2i16_split(i16 addrspace(5)* byval %in) #0 {
+define <2 x i16> @load_private_v2i16_split(i16 addrspace(5)* byval(i16) %in) #0 {
entry:
%gep = getelementptr inbounds i16, i16 addrspace(5)* %in, i32 1
%load0 = load volatile i16, i16 addrspace(5)* %in
ret void
}
-define void @load_private_lo_v2i16_reglo_vreg(i16 addrspace(5)* byval %in, i32 %reg) #0 {
+define void @load_private_lo_v2i16_reglo_vreg(i16 addrspace(5)* byval(i16) %in, i32 %reg) #0 {
; GFX900-MUBUF-LABEL: load_private_lo_v2i16_reglo_vreg:
; GFX900-MUBUF: ; %bb.0: ; %entry
; GFX900-MUBUF-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
ret void
}
-define void @load_private_lo_v2i16_reghi_vreg(i16 addrspace(5)* byval %in, i16 %reg) #0 {
+define void @load_private_lo_v2i16_reghi_vreg(i16 addrspace(5)* byval(i16) %in, i16 %reg) #0 {
; GFX900-MUBUF-LABEL: load_private_lo_v2i16_reghi_vreg:
; GFX900-MUBUF: ; %bb.0: ; %entry
; GFX900-MUBUF-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
ret void
}
-define void @load_private_lo_v2f16_reglo_vreg(half addrspace(5)* byval %in, i32 %reg) #0 {
+define void @load_private_lo_v2f16_reglo_vreg(half addrspace(5)* byval(half) %in, i32 %reg) #0 {
; GFX900-MUBUF-LABEL: load_private_lo_v2f16_reglo_vreg:
; GFX900-MUBUF: ; %bb.0: ; %entry
; GFX900-MUBUF-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
ret void
}
-define void @load_private_lo_v2i16_reglo_vreg_zexti8(i8 addrspace(5)* byval %in, i32 %reg) #0 {
+define void @load_private_lo_v2i16_reglo_vreg_zexti8(i8 addrspace(5)* byval(i8) %in, i32 %reg) #0 {
; GFX900-MUBUF-LABEL: load_private_lo_v2i16_reglo_vreg_zexti8:
; GFX900-MUBUF: ; %bb.0: ; %entry
; GFX900-MUBUF-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
ret void
}
-define void @load_private_lo_v2i16_reglo_vreg_sexti8(i8 addrspace(5)* byval %in, i32 %reg) #0 {
+define void @load_private_lo_v2i16_reglo_vreg_sexti8(i8 addrspace(5)* byval(i8) %in, i32 %reg) #0 {
; GFX900-MUBUF-LABEL: load_private_lo_v2i16_reglo_vreg_sexti8:
; GFX900-MUBUF: ; %bb.0: ; %entry
; GFX900-MUBUF-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
; CHECK-LABEL: define void @skip_byval_arg(
; CHECK-NEXT: store i32 0, i32* %val
; CHECK-NEXT: ret void
-define void @skip_byval_arg(i32* byval %val) #0 {
+define void @skip_byval_arg(i32* byval(i32) %val) #0 {
store i32 0, i32* %val
ret void
}
; CHECK-LABEL: define void @skip_optnone(
; CHECK-NEXT: store i32 0, i32* %val
; CHECK-NEXT: ret void
-define void @skip_optnone(i32* byval %val) #1 {
+define void @skip_optnone(i32* byval(i32) %val) #1 {
store i32 0, i32* %val
ret void
}
; CHECK-LABEL: define void @skip_volatile(
; CHECK-NEXT: store volatile i32 0, i32* %val
; CHECK-NEXT: ret void
-define void @skip_volatile(i32* byval %val) #0 {
+define void @skip_volatile(i32* byval(i32) %val) #0 {
store volatile i32 0, i32* %val
ret void
}
; CHECK-LABEL: define void @skip_atomic(
; CHECK-NEXT: store atomic i32 0, i32* %val
; CHECK-NEXT: ret void
-define void @skip_atomic(i32* byval %val) #0 {
+define void @skip_atomic(i32* byval(i32) %val) #0 {
store atomic i32 0, i32* %val seq_cst, align 4
ret void
}
; GFX9-NEXT: v_add_u32_e32 v0, v0, v1
; GCN-NEXT: s_setpc_b64 s[30:31]
-define hidden fastcc i32 @i32_fastcc_i32_byval_i32(i32 %arg0, i32 addrspace(5)* byval align 4 %arg1) #1 {
+define hidden fastcc i32 @i32_fastcc_i32_byval_i32(i32 %arg0, i32 addrspace(5)* byval(i32) align 4 %arg1) #1 {
%arg1.load = load i32, i32 addrspace(5)* %arg1, align 4
%add0 = add i32 %arg0, %arg1.load
ret i32 %add0
; GCN: s_swappc_b64
; GCN-NOT: v_readlane_b32 s32
; GCN: s_setpc_b64
-define fastcc i32 @sibling_call_i32_fastcc_i32_byval_i32_byval_parent(i32 %a, i32 addrspace(5)* byval %b.byval, i32 %c) #1 {
+define fastcc i32 @sibling_call_i32_fastcc_i32_byval_i32_byval_parent(i32 %a, i32 addrspace(5)* byval(i32) %b.byval, i32 %c) #1 {
entry:
- %ret = tail call fastcc i32 @i32_fastcc_i32_byval_i32(i32 %a, i32 addrspace(5)* %b.byval)
+ %ret = tail call fastcc i32 @i32_fastcc_i32_byval_i32(i32 %a, i32 addrspace(5)* byval(i32) %b.byval)
ret i32 %ret
}
; GCN-NEXT: s_setpc_b64
define fastcc i32 @sibling_call_i32_fastcc_i32_byval_i32(i32 %a, [32 x i32] %large) #1 {
entry:
- %ret = tail call fastcc i32 @i32_fastcc_i32_byval_i32(i32 %a, i32 addrspace(5)* inttoptr (i32 16 to i32 addrspace(5)*))
+ %ret = tail call fastcc i32 @i32_fastcc_i32_byval_i32(i32 %a, i32 addrspace(5)* byval(i32) inttoptr (i32 16 to i32 addrspace(5)*))
ret i32 %ret
}
ret void
}
-define void @spill_bp_to_memory_scratch_reg_needed_mubuf_offset(<32 x i32> %a, i32 %b, [4096 x i8] addrspace(5)* byval align 4 %arg) #5 {
+define void @spill_bp_to_memory_scratch_reg_needed_mubuf_offset(<32 x i32> %a, i32 %b, [4096 x i8] addrspace(5)* byval([4096 x i8]) align 4 %arg) #5 {
; If the size of the offset exceeds the MUBUF offset field we need another
; scratch VGPR to hold the offset.
; GCN-NEXT: s_waitcnt
; GCN-NEXT: s_setpc_b64
-define void @store_private_hi_v2i16_max_offset(i16 addrspace(5)* byval %out, i32 %arg) #0 {
+define void @store_private_hi_v2i16_max_offset(i16 addrspace(5)* byval(i16) %out, i32 %arg) #0 {
entry:
%value = bitcast i32 %arg to <2 x i16>
%hi = extractelement <2 x i16> %value, i32 1
define fastcc void @t() {
entry:
- %tmp28 = call fastcc i1 null(i32* null, %"byte[]" undef, %"byte[]" undef, %tango.time.Time.Time* byval null) ; <i1> [#uses=0]
+ %tmp28 = call fastcc i1 null(i32* null, %"byte[]" undef, %"byte[]" undef, %tango.time.Time.Time* byval(%tango.time.Time.Time) null) ; <i1> [#uses=0]
ret void
}
define i32 @"\01_fnmatch"(i8* %pattern, i8* %string, i32 %flags) nounwind optsize {
entry:
- %call4 = tail call i32 @fnmatch1(i8* %pattern, i8* %string, i8* %string, i32 %flags, %union.__mbstate_t* byval @"\01_fnmatch.initial", %union.__mbstate_t* byval @"\01_fnmatch.initial", %struct._xlocale* undef, i32 64) optsize
+ %call4 = tail call i32 @fnmatch1(i8* %pattern, i8* %string, i8* %string, i32 %flags, %union.__mbstate_t* byval(%union.__mbstate_t) @"\01_fnmatch.initial", %union.__mbstate_t* byval(%union.__mbstate_t) @"\01_fnmatch.initial", %struct._xlocale* undef, i32 64) optsize
ret i32 %call4
}
-declare i32 @fnmatch1(i8*, i8*, i8*, i32, %union.__mbstate_t* byval, %union.__mbstate_t* byval, %struct._xlocale*, i32) nounwind optsize
+declare i32 @fnmatch1(i8*, i8*, i8*, i32, %union.__mbstate_t* byval(%union.__mbstate_t), %union.__mbstate_t* byval(%union.__mbstate_t), %struct._xlocale*, i32) nounwind optsize
; CHECK: add sp, #12
; CHECK: b.w _puts
-define void @f(i8* %s, %struct.A* nocapture byval %a) nounwind optsize {
+define void @f(i8* %s, %struct.A* nocapture byval(%struct.A) %a) nounwind optsize {
entry:
%puts = tail call i32 @puts(i8* %s)
ret void
; CHECK: movw r0, #555
define i32 @main() {
entry:
- call void (i32, ...) @test_byval_8_bytes_alignment(i32 555, %struct_t* byval @static_val)
+ call void (i32, ...) @test_byval_8_bytes_alignment(i32 555, %struct_t* byval(%struct_t) @static_val)
ret i32 0
}
; CHECK-DAG: str r3, [sp, #12]
; CHECK-DAG: str r2, [sp, #8]
; CHECK-NOT: str r1
-define void @test_byval_8_bytes_alignment_fixed_arg(i32 %n1, %struct_t* byval %val) nounwind {
+define void @test_byval_8_bytes_alignment_fixed_arg(i32 %n1, %struct_t* byval(%struct_t) %val) nounwind {
entry:
%a = getelementptr inbounds %struct_t, %struct_t* %val, i32 0, i32 0
%0 = load double, double* %a
; CHECK: movw r0, #555
define i32 @main_fixed_arg() {
entry:
- call void (i32, %struct_t*) @test_byval_8_bytes_alignment_fixed_arg(i32 555, %struct_t* byval @static_val)
+ call void (i32, %struct_t*) @test_byval_8_bytes_alignment_fixed_arg(i32 555, %struct_t* byval(%struct_t) @static_val)
ret i32 0
}
; CHECK-DAG: str r3, [sp, #12]
; CHECK-DAG: str r2, [sp, #8]
; CHECK: vldr d16, [sp, #8]
-define void @test_byval_usage_scheduling(i32 %n1, i32 %n2, %struct_t* byval %val) nounwind {
+define void @test_byval_usage_scheduling(i32 %n1, i32 %n2, %struct_t* byval(%struct_t) %val) nounwind {
entry:
%a = getelementptr inbounds %struct_t, %struct_t* %val, i32 0, i32 0
%0 = load double, double* %a
%my_struct_t = type { i8, i8, i8, i8, i8 }
@main.val = private unnamed_addr constant %my_struct_t { i8 1, i8 2, i8 3, i8 4, i8 5 }
-declare void @f(i32 %n1, i32 %n2, i32 %n3, %my_struct_t* byval %val);
+declare void @f(i32 %n1, i32 %n2, i32 %n3, %my_struct_t* byval(%my_struct_t) %val);
; CHECK-LABEL: main:
define i32 @main() nounwind {
entry:
; CHECK: ldrb {{(r[0-9]+)}}, {{(\[r[0-9]+\])}}, #1
- call void @f(i32 555, i32 555, i32 555, %my_struct_t* byval @main.val)
+ call void @f(i32 555, i32 555, i32 555, %my_struct_t* byval(%my_struct_t) @main.val)
ret i32 0
}
; RUN: llc < %s -mtriple=armv7-linux-gnueabi | FileCheck %s
%struct.s = type { [4 x i32] }
-@v = constant %struct.s zeroinitializer;
+@v = constant %struct.s zeroinitializer;
declare void @f(%struct.s* %p);
; CHECK-LABEL: t:
-define void @t(i32 %a, %struct.s* byval %s) nounwind {
+define void @t(i32 %a, %struct.s* byval(%struct.s) %s) nounwind {
entry:
; Here we need to only check proper start address of restored %s argument.
define void @caller() {
; CHECK: ldm r{{[0-9]+}}, {r1, r2, r3}
- call void @t(i32 0, %struct.s* @v);
+ call void @t(i32 0, %struct.s* byval(%struct.s) @v);
ret void
}
declare void @fooUseParam(%artz* )
-define void @foo(%artz* byval %s) {
+define void @foo(%artz* byval(%artz) %s) {
call void @fooUseParam(%artz* %s)
ret void
}
-define void @foo2(%artz* byval %s, i32 %p, %artz* byval %s2) {
+define void @foo2(%artz* byval(%artz) %s, i32 %p, %artz* byval(%artz) %s2) {
call void @fooUseParam(%artz* %s)
call void @fooUseParam(%artz* %s2)
ret void
define void @doFoo() {
- call void @foo(%artz* byval @static_val)
+ call void @foo(%artz* byval(%artz) @static_val)
ret void
}
define void @doFoo2() {
- call void @foo2(%artz* byval @static_val, i32 0, %artz* byval @static_val)
+ call void @foo2(%artz* byval(%artz) @static_val, i32 0, %artz* byval(%artz) @static_val)
ret void
}
double %p7, ; --> D7
double %p8, ; --> Stack
i32 %p9, ; --> R0
- %struct_t* byval %p10) ; --> Stack+8
+ %struct_t* byval(%struct_t) %p10) ; --> Stack+8
{
entry:
;CHECK: push {r7, lr}
double 23.7, ; --> D7
double 23.8, ; --> Stack
i32 43, ; --> R0, not Stack+8
- %struct_t* byval @static_val) ; --> Stack+8, not R1
+ %struct_t* byval(%struct_t) @static_val) ; --> Stack+8, not R1
ret void
}
double %vfp6, ; --> D6, NSAA=SP
double %vfp7, ; --> D7, NSAA=SP
double %vfp8, ; --> SP, NSAA=SP+8 (!)
- i32 %p0, ; --> R0, NSAA=SP+8
- %st_t* byval %p1, ; --> R1, R2, NSAA=SP+8
- i32 %p2, ; --> R3, NSAA=SP+8
+ i32 %p0, ; --> R0, NSAA=SP+8
+ %st_t* byval(%st_t) %p1, ; --> R1, R2, NSAA=SP+8
+ i32 %p2, ; --> R3, NSAA=SP+8
i32 %p3) #0 { ; --> SP+4, NSAA=SP+12
entry:
;CHECK: sub sp, #12
double 23.6,
double 23.7,
double 23.8,
- i32 0, %st_t* byval @static_val, i32 1, i32 2)
+ i32 0, %st_t* byval(%st_t) @static_val, i32 1, i32 2)
ret void
}
double %vfp6, ; --> D6, NSAA=SP
double %vfp7, ; --> D7, NSAA=SP
double %vfp8, ; --> SP, NSAA=SP+8 (!)
- i32 %p0, ; --> R0, NSAA=SP+8
- %st_t* byval %p1, ; --> SP+8, 4 words NSAA=SP+24
- i32 %p2) #0 { ; --> SP+24, NSAA=SP+24
-
+ i32 %p0, ; --> R0, NSAA=SP+8
+ %st_t* byval(%st_t) %p1, ; --> SP+8, 4 words NSAA=SP+24
+ i32 %p2) #0 { ; --> SP+24, NSAA=SP+24
+
entry:
;CHECK: push {r7, lr}
;CHECK: ldr r0, [sp, #32]
double 23.6,
double 23.7,
double 23.8,
- i32 0, %st_t* byval @static_val, i32 1)
+ i32 0, %st_t* byval(%st_t) @static_val, i32 1)
ret void
}
define void @check227(
i32 %b,
- %struct.S227* byval nocapture %arg0,
+ %struct.S227* byval(%struct.S227) nocapture %arg0,
%struct.S227* %arg1) {
; b --> R0
; arg0 --> [R1, R2, R3, SP+0 .. SP+188)
%struct4bytes = type { i32 }
%struct20bytes = type { i32, i32, i32, i32, i32 }
-define void @foo(%struct4bytes* byval %p0, ; --> R0
- %struct20bytes* byval %p1 ; --> R1,R2,R3, [SP+0 .. SP+8)
+define void @foo(%struct4bytes* byval(%struct4bytes) %p0, ; --> R0
+ %struct20bytes* byval(%struct20bytes) %p1 ; --> R1,R2,R3, [SP+0 .. SP+8)
) {
;CHECK: sub sp, sp, #16
;CHECK: push {r11, lr}
;CHECK: sub sp, sp, #8
;CHECK: sub sp, sp, #2048
;CHECK: bl callme0
- call void @callme0(%big_struct0* byval %p0)
+ call void @callme0(%big_struct0* byval(%big_struct0) %p0)
;CHECK: add sp, sp, #8
;CHECK: add sp, sp, #2048
;CHECK: sub sp, sp, #2048
;CHECK: bl callme1
- call void @callme1(%big_struct1* byval %p1)
+ call void @callme1(%big_struct1* byval(%big_struct1) %p1)
;CHECK: add sp, sp, #2048
ret void
}
-declare void @callme0(%big_struct0* byval)
-declare void @callme1(%big_struct1* byval)
+declare void @callme0(%big_struct0* byval(%big_struct0))
+declare void @callme1(%big_struct1* byval(%big_struct1))
; a -> r0
; b -> r1..r3
; c -> sp+0..sp+7
-define void @foo1(i32 %a, %struct12bytes* byval %b, i64 %c) {
+define void @foo1(i32 %a, %struct12bytes* byval(%struct12bytes) %b, i64 %c) {
; CHECK-LABEL: foo1
; CHECK: sub sp, sp, #12
; CHECK: push {r11, lr}
; a -> r0
; b -> r2..r3
-define void @foo2(i32 %a, %struct8bytes8align* byval %b) {
+define void @foo2(i32 %a, %struct8bytes8align* byval(%struct8bytes8align) %b) {
; CHECK-LABEL: foo2
; CHECK: sub sp, sp, #8
; CHECK: push {r11, lr}
; a -> r0..r1
; b -> r2
-define void @foo3(%struct8bytes8align* byval %a, %struct4bytes* byval %b) {
+define void @foo3(%struct8bytes8align* byval(%struct8bytes8align) %a, %struct4bytes* byval(%struct4bytes) %b) {
; CHECK-LABEL: foo3
; CHECK: sub sp, sp, #16
; CHECK: push {r11, lr}
; a -> r0
; b -> r2..r3
-define void @foo4(%struct4bytes* byval %a, %struct8bytes8align* byval %b) {
+define void @foo4(%struct4bytes* byval(%struct4bytes) %a, %struct8bytes8align* byval(%struct8bytes8align) %b) {
; CHECK-LABEL: foo4
; CHECK: sub sp, sp, #16
; CHECK: push {r11, lr}
; a -> r0..r1
; b -> r2
; c -> r3
-define void @foo5(%struct8bytes8align* byval %a, %struct4bytes* byval %b, %struct4bytes* byval %c) {
+define void @foo5(%struct8bytes8align* byval(%struct8bytes8align) %a, %struct4bytes* byval(%struct4bytes) %b, %struct4bytes* byval(%struct4bytes) %c) {
; CHECK-LABEL: foo5
; CHECK: sub sp, sp, #16
; CHECK: push {r11, lr}
; a..c -> r0..r2
; d -> sp+0..sp+7
-define void @foo6(i32 %a, i32 %b, i32 %c, %struct8bytes8align* byval %d) {
+define void @foo6(i32 %a, i32 %b, i32 %c, %struct8bytes8align* byval(%struct8bytes8align) %d) {
; CHECK-LABEL: foo6
; CHECK: push {r11, lr}
; CHECK: add r0, sp, #8
%byval.class = type { i32 }
-define void @test_byval_arg(%byval.class* byval %x) {
+define void @test_byval_arg(%byval.class* byval(%byval.class) %x) {
; CHECK: remark: {{.*}} unable to lower arguments: void (%byval.class*)*
; CHECK-LABEL: warning: Instruction selection used fallback path for test_byval
ret void
define void @test_byval_param(%byval.class* %x) {
; CHECK: remark: {{.*}} unable to translate instruction: call
; CHECK-LABEL: warning: Instruction selection used fallback path for test_byval_param
- call void @test_byval_arg(%byval.class* byval %x)
+ call void @test_byval_arg(%byval.class* byval(%byval.class) %x)
ret void
}
@.str.3 = private unnamed_addr constant [2 x i8] c"d\00", align 1
declare i32* @_Z4bar3iiPKcS0_i(i32, i32, i8*, i8*, i32)
-declare void @_Z4bar1i8struct_2(i32, %struct.struct_2* byval align 4)
+declare void @_Z4bar1i8struct_2(i32, %struct.struct_2* byval(%struct.struct_2) align 4)
declare i32 @_Z4bar2PiPKc(i32*, i8*)
define void @_Z3fooiiiii(i32 %p1, i32 %p2, i32 %p3, i32 %p4, i32 %p5) #0 {
br i1 %cmp, label %for.body, label %for.end
for.end:
- call void @_Z4bar1i8struct_2(i32 %p4, %struct.struct_2* byval nonnull align 4 %params) #4
+ call void @_Z4bar1i8struct_2(i32 %p4, %struct.struct_2* byval(%struct.struct_2) nonnull align 4 %params) #4
br label %cleanup.8
cleanup.8:
@glob = external global i32*
-declare void @bar(i32*, [20000 x i8]* byval)
+declare void @bar(i32*, [20000 x i8]* byval([20000 x i8]))
; CHECK-LABEL: foo:
; We should see the stack getting additional alignment
define void @foo([20000 x i8]* %addr) {
%tmp = alloca [4 x i32], align 32
%tmp0 = getelementptr [4 x i32], [4 x i32]* %tmp, i32 0, i32 0
- call void @bar(i32* %tmp0, [20000 x i8]* byval %addr)
+ call void @bar(i32* %tmp0, [20000 x i8]* byval([20000 x i8]) %addr)
ret void
}
; users of byval alignments > 4, so no real calls for ABI stability.
; "byval align 16" can't fit in any regs with an i8* taking up r0.
-define i32 @test_align16(i8*, [4 x i32]* byval align 16 %b) {
+define i32 @test_align16(i8*, [4 x i32]* byval([4 x i32]) align 16 %b) {
; CHECK-LABEL: test_align16:
; CHECK-NOT: sub sp
; CHECK: push {r4, r7, lr}
; byval align 8 can, but we used to incorrectly set r7 here (miscalculating the
; space taken up by arg regs).
-define i32 @test_align8(i8*, [4 x i32]* byval align 8 %b) {
+define i32 @test_align8(i8*, [4 x i32]* byval([4 x i32]) align 8 %b) {
; CHECK-LABEL: test_align8:
; CHECK: sub sp, #8
; CHECK: push {r4, r7, lr}
; "byval align 32" can't fit in regs no matter what: it would be misaligned
; unless the incoming stack was deliberately misaligned.
-define i32 @test_align32(i8*, [4 x i32]* byval align 32 %b) {
+define i32 @test_align32(i8*, [4 x i32]* byval([4 x i32]) align 32 %b) {
; CHECK-LABEL: test_align32:
; CHECK-NOT: sub sp
; CHECK: push {r4, r7, lr}
; While we're here, make sure the caller also puts it at sp
; CHECK: mov r[[BASE:[0-9]+]], sp
; CHECK: vst1.32 {d{{[0-9]+}}, d{{[0-9]+}}}, [r[[BASE]]]
- call i32 @test_align16(i8* null, [4 x i32]* byval align 16 @var)
+ call i32 @test_align16(i8* null, [4 x i32]* byval([4 x i32]) align 16 @var)
ret void
}
; Function Attrs: nounwind ssp
define void @Client() #0 {
entry:
- tail call void @Logger(i8 signext 97, %struct.ModuleID* byval @sID) #2
+ tail call void @Logger(i8 signext 97, %struct.ModuleID* byval(%struct.ModuleID) @sID) #2
ret void
}
-declare void @Logger(i8 signext, %struct.ModuleID* byval) #1
+declare void @Logger(i8 signext, %struct.ModuleID* byval(%struct.ModuleID)) #1
attributes #0 = { nounwind ssp "less-precise-fpmad"="false" "frame-pointer"="all" "no-infs-fp-math"="false" "no-nans-fp-math"="false" "stack-protector-buffer-size"="8" "unsafe-fp-math"="false" "use-soft-float"="false" }
attributes #1 = { "less-precise-fpmad"="false" "frame-pointer"="all" "no-infs-fp-math"="false" "no-nans-fp-math"="false" "stack-protector-buffer-size"="8" "unsafe-fp-math"="false" "use-soft-float"="false" }
entry:
%0 = load i64, i64* %q, align 8
%sub = add nsw i64 %0, -1
- tail call arm_aapcscc void bitcast (void (...)* @r to void (%struct.anon*, %struct.anon*, i64)*)(%struct.anon* byval nonnull align 8 %p, %struct.anon* byval nonnull align 8 %p, i64 %sub)
+ tail call arm_aapcscc void bitcast (void (...)* @r to void (%struct.anon*, %struct.anon*, i64)*)(%struct.anon* byval(%struct.anon) nonnull align 8 %p, %struct.anon* byval(%struct.anon) nonnull align 8 %p, i64 %sub)
ret void
}
; RUN: llc < %s -frame-pointer=all -mcpu=cortex-a8 -mtriple arm-linux-gnu -target-abi=apcs -o - | FileCheck %s
; This test is fairly fragile. The goal is to ensure that "large" stack
-; objects are allocated closest to the stack protector (i.e., farthest away
+; objects are allocated closest to the stack protector (i.e., farthest away
; from the Stack Pointer.) In standard SSP mode this means that large (>=
; ssp-buffer-size) arrays and structures containing such arrays are
; closet to the protector. With sspstrong and sspreq this means large
%coerce.dive26 = getelementptr %struct.struct_small_nonchar, %struct.struct_small_nonchar* %d, i32 0, i32 0
%7 = bitcast [2 x i16]* %coerce.dive26 to i32*
%8 = load i32, i32* %7, align 1
- call void @takes_all(i64 %4, i16 %6, %struct.struct_large_nonchar* byval align 4 %c, i32 %8, i8* %arraydecay, i8* %arraydecay22, i32* %arraydecay23, i16* %arraydecay24, i32* %ptr, i32 %0, i32 %1, i32 %2)
+ call void @takes_all(i64 %4, i16 %6, %struct.struct_large_nonchar* byval(%struct.struct_large_nonchar) align 4 %c, i32 %8, i8* %arraydecay, i8* %arraydecay22, i32* %arraydecay23, i16* %arraydecay24, i32* %ptr, i32 %0, i32 %1, i32 %2)
ret void
}
; 136 large_char . arrays >= ssp-buffer-size
; 128 struct_large_char .
; 96 struct_large_nonchar .
-; 84+8 small_non_char | Group 2, nested arrays,
+; 84+8 small_non_char | Group 2, nested arrays,
; 90 small_char | arrays < ssp-buffer-size
; 88 struct_small_char |
; 84 struct_small_nonchar |
; 76 scalar1 + Group 4, everything else
; 72 scalar2 +
; 68 scalar3 +
-;
+;
; CHECK: layout_sspstrong:
; CHECK: bl get_scalar1
%coerce.dive26 = getelementptr %struct.struct_small_nonchar, %struct.struct_small_nonchar* %d, i32 0, i32 0
%7 = bitcast [2 x i16]* %coerce.dive26 to i32*
%8 = load i32, i32* %7, align 1
- call void @takes_all(i64 %4, i16 %6, %struct.struct_large_nonchar* byval align 4 %c, i32 %8, i8* %arraydecay, i8* %arraydecay22, i32* %arraydecay23, i16* %arraydecay24, i32* %ptr, i32 %0, i32 %1, i32 %2)
+ call void @takes_all(i64 %4, i16 %6, %struct.struct_large_nonchar* byval(%struct.struct_large_nonchar) align 4 %c, i32 %8, i8* %arraydecay, i8* %arraydecay22, i32* %arraydecay23, i16* %arraydecay24, i32* %ptr, i32 %0, i32 %1, i32 %2)
ret void
}
define void @layout_sspreq() sspreq {
entry:
; Expected stack layout for sspreq is the same as sspstrong
-;
+;
; CHECK: layout_sspreq:
; CHECK: bl get_scalar1
%coerce.dive26 = getelementptr %struct.struct_small_nonchar, %struct.struct_small_nonchar* %d, i32 0, i32 0
%7 = bitcast [2 x i16]* %coerce.dive26 to i32*
%8 = load i32, i32* %7, align 1
- call void @takes_all(i64 %4, i16 %6, %struct.struct_large_nonchar* byval align 4 %c, i32 %8, i8* %arraydecay, i8* %arraydecay22, i32* %arraydecay23, i16* %arraydecay24, i32* %ptr, i32 %0, i32 %1, i32 %2)
+ call void @takes_all(i64 %4, i16 %6, %struct.struct_large_nonchar* byval(%struct.struct_large_nonchar) align 4 %c, i32 %8, i8* %arraydecay, i8* %arraydecay22, i32* %arraydecay23, i16* %arraydecay24, i32* %ptr, i32 %0, i32 %1, i32 %2)
ret void
}
%coerce.dive5 = getelementptr %struct.struct_small_nonchar, %struct.struct_small_nonchar* %d2, i32 0, i32 0
%5 = bitcast [2 x i16]* %coerce.dive5 to i32*
%6 = load i32, i32* %5, align 1
- call void @takes_all(i64 %2, i16 %4, %struct.struct_large_nonchar* byval align 4 %d1, i32 %6, i8* null, i8* null, i32* null, i16* null, i32* null, i32 0, i32 0, i32 0)
+ call void @takes_all(i64 %2, i16 %4, %struct.struct_large_nonchar* byval(%struct.struct_large_nonchar) align 4 %d1, i32 %6, i8* null, i8* null, i32* null, i16* null, i32* null, i32 0, i32 0, i32 0)
ret void
}
declare signext i16 @get_struct_small_nonchar()
declare void @end_struct_small_nonchar()
-declare void @takes_all(i64, i16, %struct.struct_large_nonchar* byval align 8, i32, i8*, i8*, i32*, i16*, i32*, i32, i32, i32)
+declare void @takes_all(i64, i16, %struct.struct_large_nonchar* byval(%struct.struct_large_nonchar) align 8, i32, i8*, i8*, i32*, i16*, i32*, i32, i32, i32)
}
; Function Attrs: nounwind
-declare void @RestoreMVBlock8x8(i32, i32, %structN* byval nocapture, i32) #1
+declare void @RestoreMVBlock8x8(i32, i32, %structN* byval(%structN) nocapture, i32) #1
attributes #1 = { nounwind "less-precise-fpmad"="false" "frame-pointer"="none" "no-infs-fp-math"="false" "no-nans-fp-math"="false" "unsafe-fp-math"="false" "use-soft-float"="false" }
; CHECK: str
; CHECK-NOT:bne
%st = alloca %struct.SmallStruct, align 4
- %call = call i32 @e1(%struct.SmallStruct* byval %st)
+ %call = call i32 @e1(%struct.SmallStruct* byval(%struct.SmallStruct) %st)
ret i32 0
}
; NACL: str
; NACL: bne
%st = alloca %struct.LargeStruct, align 4
- %call = call i32 @e2(%struct.LargeStruct* byval %st)
+ %call = call i32 @e2(%struct.LargeStruct* byval(%struct.LargeStruct) %st)
ret i32 0
}
; NACL: vst1
; NACL: bne
%st = alloca %struct.LargeStruct, align 16
- %call = call i32 @e3(%struct.LargeStruct* byval align 16 %st)
+ %call = call i32 @e3(%struct.LargeStruct* byval(%struct.LargeStruct) align 16 %st)
ret i32 0
}
-declare i32 @e1(%struct.SmallStruct* nocapture byval %in) nounwind
-declare i32 @e2(%struct.LargeStruct* nocapture byval %in) nounwind
-declare i32 @e3(%struct.LargeStruct* nocapture byval align 16 %in) nounwind
+declare i32 @e1(%struct.SmallStruct* nocapture byval(%struct.SmallStruct) %in) nounwind
+declare i32 @e2(%struct.LargeStruct* nocapture byval(%struct.LargeStruct) %in) nounwind
+declare i32 @e3(%struct.LargeStruct* nocapture byval(%struct.LargeStruct) align 16 %in) nounwind
; rdar://12442472
; We can't do tail call since address of s is passed to the callee and part of
; s is in caller's local frame.
-define void @f3(%struct.SmallStruct* nocapture byval %s) nounwind optsize {
+define void @f3(%struct.SmallStruct* nocapture byval(%struct.SmallStruct) %s) nounwind optsize {
; CHECK-LABEL: f3
; CHECK: bl _consumestruct
entry:
ret void
}
-define void @f4(%struct.SmallStruct* nocapture byval %s) nounwind optsize {
+define void @f4(%struct.SmallStruct* nocapture byval(%struct.SmallStruct) %s) nounwind optsize {
; CHECK-LABEL: f4
; CHECK: bl _consumestruct
entry:
}
; We can do tail call here since s is in the incoming argument area.
-define void @f5(i32 %a, i32 %b, i32 %c, i32 %d, %struct.SmallStruct* nocapture byval %s) nounwind optsize {
+define void @f5(i32 %a, i32 %b, i32 %c, i32 %d, %struct.SmallStruct* nocapture byval(%struct.SmallStruct) %s) nounwind optsize {
; CHECK-LABEL: f5
; CHECK: b{{(\.w)?}} _consumestruct
entry:
ret void
}
-define void @f6(i32 %a, i32 %b, i32 %c, i32 %d, %struct.SmallStruct* nocapture byval %s) nounwind optsize {
+define void @f6(i32 %a, i32 %b, i32 %c, i32 %d, %struct.SmallStruct* nocapture byval(%struct.SmallStruct) %s) nounwind optsize {
; CHECK-LABEL: f6
; CHECK: b{{(\.w)?}} _consumestruct
entry:
; PR17309
%struct.I.8 = type { [10 x i32], [3 x i8] }
-declare void @use_I(%struct.I.8* byval)
+declare void @use_I(%struct.I.8* byval(%struct.I.8))
define void @test_I_16() {
; CHECK-LABEL: test_I_16
; CHECK: ldrb
; CHECK: strb
entry:
- call void @use_I(%struct.I.8* byval align 16 undef)
+ call void @use_I(%struct.I.8* byval(%struct.I.8) align 16 undef)
ret void
}
;cleanup if the number of bytes does not divide evenly by the store size
%struct.A = type <{ [ 10 x i32 ] }> ; 40 bytes
-declare void @use_A(%struct.A* byval)
+declare void @use_A(%struct.A* byval(%struct.A))
%struct.B = type <{ [ 10 x i32 ], i8 }> ; 41 bytes
-declare void @use_B(%struct.B* byval)
+declare void @use_B(%struct.B* byval(%struct.B))
%struct.C = type <{ [ 10 x i32 ], [ 3 x i8 ] }> ; 43 bytes
-declare void @use_C(%struct.C* byval)
+declare void @use_C(%struct.C* byval(%struct.C))
%struct.D = type <{ [ 100 x i32 ] }> ; 400 bytes
-declare void @use_D(%struct.D* byval)
+declare void @use_D(%struct.D* byval(%struct.D))
%struct.E = type <{ [ 100 x i32 ], i8 }> ; 401 bytes
-declare void @use_E(%struct.E* byval)
+declare void @use_E(%struct.E* byval(%struct.E))
%struct.F = type <{ [ 100 x i32 ], [ 3 x i8 ] }> ; 403 bytes
-declare void @use_F(%struct.F* byval)
+declare void @use_F(%struct.F* byval(%struct.F))
%struct.G = type { [ 10 x i32 ] } ; 40 bytes
-declare void @use_G(%struct.G* byval)
+declare void @use_G(%struct.G* byval(%struct.G))
%struct.H = type { [ 10 x i32 ], i8 } ; 41 bytes
-declare void @use_H(%struct.H* byval)
+declare void @use_H(%struct.H* byval(%struct.H))
%struct.I = type { [ 10 x i32 ], [ 3 x i8 ] } ; 43 bytes
-declare void @use_I(%struct.I* byval)
+declare void @use_I(%struct.I* byval(%struct.I))
%struct.J = type { [ 100 x i32 ] } ; 400 bytes
-declare void @use_J(%struct.J* byval)
+declare void @use_J(%struct.J* byval(%struct.J))
%struct.K = type { [ 100 x i32 ], i8 } ; 401 bytes
-declare void @use_K(%struct.K* byval)
+declare void @use_K(%struct.K* byval(%struct.K))
%struct.L = type { [ 100 x i32 ], [ 3 x i8 ] } ; 403 bytes
-declare void @use_L(%struct.L* byval)
+declare void @use_L(%struct.L* byval(%struct.L))
%struct.M = type { [ 64 x i8 ] } ; 64 bytes
-declare void @use_M(%struct.M* byval)
+declare void @use_M(%struct.M* byval(%struct.M))
%struct.N = type { [ 128 x i8 ] } ; 128 bytes
-declare void @use_N(%struct.N* byval)
+declare void @use_N(%struct.N* byval(%struct.N))
;ARM-LABEL: <test_A_1>:
;THUMB2-LABEL: <test_A_1>:
;T1POST-NOT: ldrb r{{[0-9]+}}, [{{.*}}], #1
entry:
%a = alloca %struct.A, align 1
- call void @use_A(%struct.A* byval align 1 %a)
+ call void @use_A(%struct.A* byval(%struct.A) align 1 %a)
ret void
}
;ARM-LABEL: <test_A_2>:
;T1POST-NOT: ldrh r{{[0-9]+}}, [{{.*}}], #2
entry:
%a = alloca %struct.A, align 2
- call void @use_A(%struct.A* byval align 2 %a)
+ call void @use_A(%struct.A* byval(%struct.A) align 2 %a)
ret void
}
;ARM-LABEL: <test_A_4>:
;T1POST-NOT: ldr r{{[0-9]+}}, [{{.*}}], #4
entry:
%a = alloca %struct.A, align 4
- call void @use_A(%struct.A* byval align 4 %a)
+ call void @use_A(%struct.A* byval(%struct.A) align 4 %a)
ret void
}
;ARM-LABEL: <test_A_8>:
;T1POST-NOT: vld1.32 {d{{[0-9]+}}}, [{{.*}}]!
entry:
%a = alloca %struct.A, align 8
- call void @use_A(%struct.A* byval align 8 %a)
+ call void @use_A(%struct.A* byval(%struct.A) align 8 %a)
ret void
}
;ARM-LABEL: <test_A_16>:
;T1POST-NOT: vld1.32 {d{{[0-9]+}}, d{{[0-9]+}}}, [{{.*}}]!
entry:
%a = alloca %struct.A, align 16
- call void @use_A(%struct.A* byval align 16 %a)
+ call void @use_A(%struct.A* byval(%struct.A) align 16 %a)
ret void
}
;ARM-LABEL: <test_B_1>:
;T1POST-NOT: ldrb r{{[0-9]+}}, [{{.*}}], #1
entry:
%a = alloca %struct.B, align 1
- call void @use_B(%struct.B* byval align 1 %a)
+ call void @use_B(%struct.B* byval(%struct.B) align 1 %a)
ret void
}
;ARM-LABEL: <test_B_2>:
;T1POST-NOT: ldrh r{{[0-9]+}}, [{{.*}}], #2
entry:
%a = alloca %struct.B, align 2
- call void @use_B(%struct.B* byval align 2 %a)
+ call void @use_B(%struct.B* byval(%struct.B) align 2 %a)
ret void
}
;ARM-LABEL: <test_B_4>:
;T1POST-NOT: ldr r{{[0-9]+}}, [{{.*}}], #4
entry:
%a = alloca %struct.B, align 4
- call void @use_B(%struct.B* byval align 4 %a)
+ call void @use_B(%struct.B* byval(%struct.B) align 4 %a)
ret void
}
;ARM-LABEL: <test_B_8>:
;T1POST-NOT: vld1.32 {d{{[0-9]+}}}, [{{.*}}]!
entry:
%a = alloca %struct.B, align 8
- call void @use_B(%struct.B* byval align 8 %a)
+ call void @use_B(%struct.B* byval(%struct.B) align 8 %a)
ret void
}
;ARM-LABEL: <test_B_16>:
;T1POST-NOT: vld1.32 {d{{[0-9]+}}, d{{[0-9]+}}}, [{{.*}}]!
entry:
%a = alloca %struct.B, align 16
- call void @use_B(%struct.B* byval align 16 %a)
+ call void @use_B(%struct.B* byval(%struct.B) align 16 %a)
ret void
}
;ARM-LABEL: <test_C_1>:
;T1POST-NOT: ldrb r{{[0-9]+}}, [{{.*}}], #1
entry:
%a = alloca %struct.C, align 1
- call void @use_C(%struct.C* byval align 1 %a)
+ call void @use_C(%struct.C* byval(%struct.C) align 1 %a)
ret void
}
;ARM-LABEL: <test_C_2>:
;T1POST-NOT: ldrh r{{[0-9]+}}, [{{.*}}], #2
entry:
%a = alloca %struct.C, align 2
- call void @use_C(%struct.C* byval align 2 %a)
+ call void @use_C(%struct.C* byval(%struct.C) align 2 %a)
ret void
}
;ARM-LABEL: <test_C_4>:
;T1POST-NOT: ldr r{{[0-9]+}}, [{{.*}}], #4
entry:
%a = alloca %struct.C, align 4
- call void @use_C(%struct.C* byval align 4 %a)
+ call void @use_C(%struct.C* byval(%struct.C) align 4 %a)
ret void
}
;ARM-LABEL: <test_C_8>:
;T1POST-NOT: vld1.32 {d{{[0-9]+}}}, [{{.*}}]!
entry:
%a = alloca %struct.C, align 8
- call void @use_C(%struct.C* byval align 8 %a)
+ call void @use_C(%struct.C* byval(%struct.C) align 8 %a)
ret void
}
;ARM-LABEL: <test_C_16>:
;T1POST-NOT: vld1.32 {d{{[0-9]+}}, d{{[0-9]+}}}, [{{.*}}]!
entry:
%a = alloca %struct.C, align 16
- call void @use_C(%struct.C* byval align 16 %a)
+ call void @use_C(%struct.C* byval(%struct.C) align 16 %a)
ret void
}
;ARM-LABEL: <test_D_1>:
;T1POST-NOT: ldrb r{{[0-9]+}}, [{{.*}}], #1
entry:
%a = alloca %struct.D, align 1
- call void @use_D(%struct.D* byval align 1 %a)
+ call void @use_D(%struct.D* byval(%struct.D) align 1 %a)
ret void
}
;ARM-LABEL: <test_D_2>:
;T1POST-NOT: ldrh r{{[0-9]+}}, [{{.*}}], #2
entry:
%a = alloca %struct.D, align 2
- call void @use_D(%struct.D* byval align 2 %a)
+ call void @use_D(%struct.D* byval(%struct.D) align 2 %a)
ret void
}
;ARM-LABEL: <test_D_4>:
;T1POST-NOT: ldr r{{[0-9]+}}, [{{.*}}], #4
entry:
%a = alloca %struct.D, align 4
- call void @use_D(%struct.D* byval align 4 %a)
+ call void @use_D(%struct.D* byval(%struct.D) align 4 %a)
ret void
}
;ARM-LABEL: <test_D_8>:
;T1POST-NOT: vld1.32 {d{{[0-9]+}}}, [{{.*}}]!
entry:
%a = alloca %struct.D, align 8
- call void @use_D(%struct.D* byval align 8 %a)
+ call void @use_D(%struct.D* byval(%struct.D) align 8 %a)
ret void
}
;ARM-LABEL: <test_D_16>:
;T1POST-NOT: vld1.32 {d{{[0-9]+}}, d{{[0-9]+}}}, [{{.*}}]!
entry:
%a = alloca %struct.D, align 16
- call void @use_D(%struct.D* byval align 16 %a)
+ call void @use_D(%struct.D* byval(%struct.D) align 16 %a)
ret void
}
;ARM-LABEL: <test_E_1>:
;T1POST-NOT: ldrb r{{[0-9]+}}, [{{.*}}], #1
entry:
%a = alloca %struct.E, align 1
- call void @use_E(%struct.E* byval align 1 %a)
+ call void @use_E(%struct.E* byval(%struct.E) align 1 %a)
ret void
}
;ARM-LABEL: <test_E_2>:
;T1POST-NOT: ldrh r{{[0-9]+}}, [{{.*}}], #2
entry:
%a = alloca %struct.E, align 2
- call void @use_E(%struct.E* byval align 2 %a)
+ call void @use_E(%struct.E* byval(%struct.E) align 2 %a)
ret void
}
;ARM-LABEL: <test_E_4>:
;T1POST-NOT: ldr r{{[0-9]+}}, [{{.*}}], #4
entry:
%a = alloca %struct.E, align 4
- call void @use_E(%struct.E* byval align 4 %a)
+ call void @use_E(%struct.E* byval(%struct.E) align 4 %a)
ret void
}
;ARM-LABEL: <test_E_8>:
;T1POST-NOT: vld1.32 {d{{[0-9]+}}}, [{{.*}}]!
entry:
%a = alloca %struct.E, align 8
- call void @use_E(%struct.E* byval align 8 %a)
+ call void @use_E(%struct.E* byval(%struct.E) align 8 %a)
ret void
}
;ARM-LABEL: <test_E_16>:
;T1POST-NOT: vld1.32 {d{{[0-9]+}}, d{{[0-9]+}}}, [{{.*}}]!
entry:
%a = alloca %struct.E, align 16
- call void @use_E(%struct.E* byval align 16 %a)
+ call void @use_E(%struct.E* byval(%struct.E) align 16 %a)
ret void
}
;ARM-LABEL: <test_F_1>:
;T1POST-NOT: ldrb r{{[0-9]+}}, [{{.*}}], #1
entry:
%a = alloca %struct.F, align 1
- call void @use_F(%struct.F* byval align 1 %a)
+ call void @use_F(%struct.F* byval(%struct.F) align 1 %a)
ret void
}
;ARM-LABEL: <test_F_2>:
;T1POST-NOT: ldrh r{{[0-9]+}}, [{{.*}}], #2
entry:
%a = alloca %struct.F, align 2
- call void @use_F(%struct.F* byval align 2 %a)
+ call void @use_F(%struct.F* byval(%struct.F) align 2 %a)
ret void
}
;ARM-LABEL: <test_F_4>:
;T1POST-NOT: ldr r{{[0-9]+}}, [{{.*}}], #4
entry:
%a = alloca %struct.F, align 4
- call void @use_F(%struct.F* byval align 4 %a)
+ call void @use_F(%struct.F* byval(%struct.F) align 4 %a)
ret void
}
;ARM-LABEL: <test_F_8>:
;T1POST-NOT: vld1.32 {d{{[0-9]+}}}, [{{.*}}]!
entry:
%a = alloca %struct.F, align 8
- call void @use_F(%struct.F* byval align 8 %a)
+ call void @use_F(%struct.F* byval(%struct.F) align 8 %a)
ret void
}
;ARM-LABEL: <test_F_16>:
;T1POST-NOT: vld1.32 {d{{[0-9]+}}, d{{[0-9]+}}}, [{{.*}}]!
entry:
%a = alloca %struct.F, align 16
- call void @use_F(%struct.F* byval align 16 %a)
+ call void @use_F(%struct.F* byval(%struct.F) align 16 %a)
ret void
}
;ARM-LABEL: <test_G_1>:
;T1POST-NOT: ldrb r{{[0-9]+}}, [{{.*}}], #1
entry:
%a = alloca %struct.G, align 1
- call void @use_G(%struct.G* byval align 1 %a)
+ call void @use_G(%struct.G* byval(%struct.G) align 1 %a)
ret void
}
;ARM-LABEL: <test_G_2>:
;T1POST-NOT: ldrh r{{[0-9]+}}, [{{.*}}], #2
entry:
%a = alloca %struct.G, align 2
- call void @use_G(%struct.G* byval align 2 %a)
+ call void @use_G(%struct.G* byval(%struct.G) align 2 %a)
ret void
}
;ARM-LABEL: <test_G_4>:
;T1POST-NOT: ldr r{{[0-9]+}}, [{{.*}}], #4
entry:
%a = alloca %struct.G, align 4
- call void @use_G(%struct.G* byval align 4 %a)
+ call void @use_G(%struct.G* byval(%struct.G) align 4 %a)
ret void
}
;ARM-LABEL: <test_G_8>:
;T1POST-NOT: vld1.32 {d{{[0-9]+}}}, [{{.*}}]!
entry:
%a = alloca %struct.G, align 8
- call void @use_G(%struct.G* byval align 8 %a)
+ call void @use_G(%struct.G* byval(%struct.G) align 8 %a)
ret void
}
;ARM-LABEL: <test_G_16>:
;T1POST-NOT: vld1.32 {d{{[0-9]+}}, d{{[0-9]+}}}, [{{.*}}]!
entry:
%a = alloca %struct.G, align 16
- call void @use_G(%struct.G* byval align 16 %a)
+ call void @use_G(%struct.G* byval(%struct.G) align 16 %a)
ret void
}
;ARM-LABEL: <test_H_1>:
;T1POST-NOT: ldrb r{{[0-9]+}}, [{{.*}}], #1
entry:
%a = alloca %struct.H, align 1
- call void @use_H(%struct.H* byval align 1 %a)
+ call void @use_H(%struct.H* byval(%struct.H) align 1 %a)
ret void
}
;ARM-LABEL: <test_H_2>:
;T1POST-NOT: ldrh r{{[0-9]+}}, [{{.*}}], #2
entry:
%a = alloca %struct.H, align 2
- call void @use_H(%struct.H* byval align 2 %a)
+ call void @use_H(%struct.H* byval(%struct.H) align 2 %a)
ret void
}
;ARM-LABEL: <test_H_4>:
;T1POST-NOT: ldr r{{[0-9]+}}, [{{.*}}], #4
entry:
%a = alloca %struct.H, align 4
- call void @use_H(%struct.H* byval align 4 %a)
+ call void @use_H(%struct.H* byval(%struct.H) align 4 %a)
ret void
}
;ARM-LABEL: <test_H_8>:
;T1POST-NOT: vld1.32 {d{{[0-9]+}}}, [{{.*}}]!
entry:
%a = alloca %struct.H, align 8
- call void @use_H(%struct.H* byval align 8 %a)
+ call void @use_H(%struct.H* byval(%struct.H) align 8 %a)
ret void
}
;ARM-LABEL: <test_H_16>:
;T1POST-NOT: vld1.32 {d{{[0-9]+}}, d{{[0-9]+}}}, [{{.*}}]!
entry:
%a = alloca %struct.H, align 16
- call void @use_H(%struct.H* byval align 16 %a)
+ call void @use_H(%struct.H* byval(%struct.H) align 16 %a)
ret void
}
;ARM-LABEL: <test_I_1>:
;T1POST-NOT: ldrb r{{[0-9]+}}, [{{.*}}], #1
entry:
%a = alloca %struct.I, align 1
- call void @use_I(%struct.I* byval align 1 %a)
+ call void @use_I(%struct.I* byval(%struct.I) align 1 %a)
ret void
}
;ARM-LABEL: <test_I_2>:
;T1POST-NOT: ldrh r{{[0-9]+}}, [{{.*}}], #2
entry:
%a = alloca %struct.I, align 2
- call void @use_I(%struct.I* byval align 2 %a)
+ call void @use_I(%struct.I* byval(%struct.I) align 2 %a)
ret void
}
;ARM-LABEL: <test_I_4>:
;T1POST-NOT: ldr r{{[0-9]+}}, [{{.*}}], #4
entry:
%a = alloca %struct.I, align 4
- call void @use_I(%struct.I* byval align 4 %a)
+ call void @use_I(%struct.I* byval(%struct.I) align 4 %a)
ret void
}
;ARM-LABEL: <test_I_8>:
;T1POST-NOT: vld1.32 {d{{[0-9]+}}}, [{{.*}}]!
entry:
%a = alloca %struct.I, align 8
- call void @use_I(%struct.I* byval align 8 %a)
+ call void @use_I(%struct.I* byval(%struct.I) align 8 %a)
ret void
}
;ARM-LABEL: <test_I_16>:
;T1POST-NOT: vld1.32 {d{{[0-9]+}}, d{{[0-9]+}}}, [{{.*}}]!
entry:
%a = alloca %struct.I, align 16
- call void @use_I(%struct.I* byval align 16 %a)
+ call void @use_I(%struct.I* byval(%struct.I) align 16 %a)
ret void
}
;ARM-LABEL: <test_J_1>:
;T1POST-NOT: ldrb r{{[0-9]+}}, [{{.*}}], #1
entry:
%a = alloca %struct.J, align 1
- call void @use_J(%struct.J* byval align 1 %a)
+ call void @use_J(%struct.J* byval(%struct.J) align 1 %a)
ret void
}
;ARM-LABEL: <test_J_2>:
;T1POST-NOT: ldrh r{{[0-9]+}}, [{{.*}}], #2
entry:
%a = alloca %struct.J, align 2
- call void @use_J(%struct.J* byval align 2 %a)
+ call void @use_J(%struct.J* byval(%struct.J) align 2 %a)
ret void
}
;ARM-LABEL: <test_J_4>:
;T1POST-NOT: ldr r{{[0-9]+}}, [{{.*}}], #4
entry:
%a = alloca %struct.J, align 4
- call void @use_J(%struct.J* byval align 4 %a)
+ call void @use_J(%struct.J* byval(%struct.J) align 4 %a)
ret void
}
;ARM-LABEL: <test_J_8>:
;T1POST-NOT: vld1.32 {d{{[0-9]+}}}, [{{.*}}]!
entry:
%a = alloca %struct.J, align 8
- call void @use_J(%struct.J* byval align 8 %a)
+ call void @use_J(%struct.J* byval(%struct.J) align 8 %a)
ret void
}
;ARM-LABEL: <test_J_16>:
;T1POST-NOT: vld1.32 {d{{[0-9]+}}, d{{[0-9]+}}}, [{{.*}}]!
entry:
%a = alloca %struct.J, align 16
- call void @use_J(%struct.J* byval align 16 %a)
+ call void @use_J(%struct.J* byval(%struct.J) align 16 %a)
ret void
}
;ARM-LABEL: <test_K_1>:
;T1POST-NOT: ldrb r{{[0-9]+}}, [{{.*}}], #1
entry:
%a = alloca %struct.K, align 1
- call void @use_K(%struct.K* byval align 1 %a)
+ call void @use_K(%struct.K* byval(%struct.K) align 1 %a)
ret void
}
;ARM-LABEL: <test_K_2>:
;T1POST-NOT: ldrh r{{[0-9]+}}, [{{.*}}], #2
entry:
%a = alloca %struct.K, align 2
- call void @use_K(%struct.K* byval align 2 %a)
+ call void @use_K(%struct.K* byval(%struct.K) align 2 %a)
ret void
}
;ARM-LABEL: <test_K_4>:
;T1POST-NOT: ldr r{{[0-9]+}}, [{{.*}}], #4
entry:
%a = alloca %struct.K, align 4
- call void @use_K(%struct.K* byval align 4 %a)
+ call void @use_K(%struct.K* byval(%struct.K) align 4 %a)
ret void
}
;ARM-LABEL: <test_K_8>:
;T1POST-NOT: vld1.32 {d{{[0-9]+}}}, [{{.*}}]!
entry:
%a = alloca %struct.K, align 8
- call void @use_K(%struct.K* byval align 8 %a)
+ call void @use_K(%struct.K* byval(%struct.K) align 8 %a)
ret void
}
;ARM-LABEL: <test_K_16>:
;T1POST-NOT: vld1.32 {d{{[0-9]+}}, d{{[0-9]+}}}, [{{.*}}]!
entry:
%a = alloca %struct.K, align 16
- call void @use_K(%struct.K* byval align 16 %a)
+ call void @use_K(%struct.K* byval(%struct.K) align 16 %a)
ret void
}
;ARM-LABEL: <test_L_1>:
;T1POST-NOT: ldrb r{{[0-9]+}}, [{{.*}}], #1
entry:
%a = alloca %struct.L, align 1
- call void @use_L(%struct.L* byval align 1 %a)
+ call void @use_L(%struct.L* byval(%struct.L) align 1 %a)
ret void
}
;ARM-LABEL: <test_L_2>:
;T1POST-NOT: ldrh r{{[0-9]+}}, [{{.*}}], #2
entry:
%a = alloca %struct.L, align 2
- call void @use_L(%struct.L* byval align 2 %a)
+ call void @use_L(%struct.L* byval(%struct.L) align 2 %a)
ret void
}
;ARM-LABEL: <test_L_4>:
;T1POST-NOT: ldr r{{[0-9]+}}, [{{.*}}], #4
entry:
%a = alloca %struct.L, align 4
- call void @use_L(%struct.L* byval align 4 %a)
+ call void @use_L(%struct.L* byval(%struct.L) align 4 %a)
ret void
}
;ARM-LABEL: <test_L_8>:
;T1POST-NOT: vld1.32 {d{{[0-9]+}}}, [{{.*}}]!
entry:
%a = alloca %struct.L, align 8
- call void @use_L(%struct.L* byval align 8 %a)
+ call void @use_L(%struct.L* byval(%struct.L) align 8 %a)
ret void
}
;ARM-LABEL: <test_L_16>:
;T1POST-NOT: vld1.32 {d{{[0-9]+}}, d{{[0-9]+}}}, [{{.*}}]!
entry:
%a = alloca %struct.L, align 16
- call void @use_L(%struct.L* byval align 16 %a)
+ call void @use_L(%struct.L* byval(%struct.L) align 16 %a)
ret void
}
;V8MBASE-LABEL: <test_M>:
;V8MBASE-NOT: movw
entry:
%a = alloca %struct.M, align 1
- call void @use_M(%struct.M* byval align 1 %a)
+ call void @use_M(%struct.M* byval(%struct.M) align 1 %a)
ret void
}
;V8MBASE-LABEL: <test_N>:
;V8MBASE-NOT: b #{{[0-9]+}}
entry:
%a = alloca %struct.N, align 1
- call void @use_N(%struct.N* byval align 1 %a)
+ call void @use_N(%struct.N* byval(%struct.N) align 1 %a)
ret void
}
%arrayinit.start = getelementptr inbounds %struct.S, %struct.S* %.compoundliteral, i64 0, i32 0, i64 3
%scevgep4 = bitcast i32* %arrayinit.start to i8*
call void @llvm.memset.p0i8.i64(i8* align 4 %scevgep4, i8 0, i64 28, i1 false)
- call void @foo(i32 %a, %struct.S* byval align 8 %.compoundliteral) #3
+ call void @foo(i32 %a, %struct.S* byval(%struct.S) align 8 %.compoundliteral) #3
ret void
}
-declare void @foo(i32, %struct.S* byval align 8) #1
+declare void @foo(i32, %struct.S* byval(%struct.S) align 8) #1
; Function Attrs: nounwind
declare void @llvm.memset.p0i8.i64(i8* nocapture, i8, i64, i1) #3
%big = type [131072 x i8]
-declare void @foo(%big* byval align 1)
+declare void @foo(%big* byval(%big) align 1)
-define void @bar(%big* byval align 1 %x) {
- call void @foo(%big* byval align 1 %x)
+define void @bar(%big* byval(%big) align 1 %x) {
+ call void @foo(%big* byval(%big) align 1 %x)
ret void
}
%struct.t0 = type { i32 }
-define i32 @foo(%struct.t0* byval align 8 %s, i8 zeroext %t, i8 %u) #0 {
+define i32 @foo(%struct.t0* byval(%struct.t0) align 8 %s, i8 zeroext %t, i8 %u) #0 {
%a = zext i8 %u to i32
ret i32 %a
}
; CHECK-TWO: memw(r29+#52) = r2
; CHECK-THREE: memw(r29+#56) = r2
-define void @f0(%s.0* noalias nocapture sret %a0, i32 %a1, i8 zeroext %a2, %s.0* byval nocapture readnone align 8 %a3, %s.1* byval nocapture readnone align 8 %a4) #0 {
+define void @f0(%s.0* noalias nocapture sret %a0, i32 %a1, i8 zeroext %a2, %s.0* byval(%s.0) nocapture readnone align 8 %a3, %s.1* byval(%s.1) nocapture readnone align 8 %a4) #0 {
b0:
%v0 = alloca %s.0, align 8
%v1 = load %s.0*, %s.0** @g0, align 4
%v3 = add nsw i64 %v2, 1
%v4 = add nsw i32 %a1, 2
%v5 = add nsw i64 %v2, 3
- call void @f1(%s.0* sret %v0, i32 45, %s.0* byval align 8 %v1, %s.0* byval align 8 %v1, i8 zeroext %a2, i64 %v3, i32 %v4, i64 %v5, i8 zeroext %a2, i8 zeroext %a2, i8 zeroext %a2, i32 45)
+ call void @f1(%s.0* sret %v0, i32 45, %s.0* byval(%s.0) align 8 %v1, %s.0* byval(%s.0) align 8 %v1, i8 zeroext %a2, i64 %v3, i32 %v4, i64 %v5, i8 zeroext %a2, i8 zeroext %a2, i8 zeroext %a2, i32 45)
%v6 = bitcast %s.0* %v0 to i32*
store i32 20, i32* %v6, align 8
%v7 = bitcast %s.0* %a0 to i8*
ret void
}
-declare void @f1(%s.0* sret, i32, %s.0* byval align 8, %s.0* byval align 8, i8 zeroext, i64, i32, i64, i8 zeroext, i8 zeroext, i8 zeroext, i32)
+declare void @f1(%s.0* sret, i32, %s.0* byval(%s.0) align 8, %s.0* byval(%s.0) align 8, i8 zeroext, i64, i32, i64, i8 zeroext, i8 zeroext, i8 zeroext, i32)
; Function Attrs: argmemonly nounwind
declare void @llvm.memcpy.p0i8.p0i8.i32(i8* nocapture writeonly, i8* nocapture readonly, i32, i1) #1
%s.9 = type { i8, i8 }
; Function Attrs: nounwind optsize
- define dso_local void @f0(%s.0* byval nocapture readonly align 8 %a0) local_unnamed_addr #0 {
+ define dso_local void @f0(%s.0* byval(%s.0) nocapture readonly align 8 %a0) local_unnamed_addr #0 {
b0:
%v0 = getelementptr inbounds %s.0, %s.0* %a0, i32 0, i32 10
%v1 = load i8, i8* %v0, align 8
%s.1 = type { %s.2 }
%s.2 = type { i32, i8* }
-define void @f0(%s.0* byval align 8 %a0) {
+define void @f0(%s.0* byval(%s.0) align 8 %a0) {
b0:
call void asm sideeffect ".weak OFFSET_0;jump ##(OFFSET_0 + 0x14c15f0)", "*r"(%s.0* nonnull %a0), !srcloc !0
ret void
define void @foo() nounwind {
entry:
- call void @bar(%struct.large* byval @s2)
+ call void @bar(%struct.large* byval(%struct.large) @s2)
ret void
}
-declare void @bar(%struct.large* byval)
+declare void @bar(%struct.large* byval(%struct.large))
declare hidden fastcc void @foo(%struct.0* noalias nocapture, i8 signext, i8 zeroext, i32, i64, i64) unnamed_addr #0
-define void @fred(%struct.0* noalias nocapture sret %agg.result, %struct.1* byval nocapture readonly align 8 %a, i32 %a0) #1 {
+define void @fred(%struct.0* noalias nocapture sret %agg.result, %struct.1* byval(%struct.1) nocapture readonly align 8 %a, i32 %a0) #1 {
entry:
%0 = load i64, i64* undef, align 8
switch i32 %a0, label %if.else [
declare i32 @f0(i8* nocapture, ...) #0
; Function Attrs: nounwind
-define void @f1(%s.0* byval %a0, <16 x i32> %a1) #0 {
+define void @f1(%s.0* byval(%s.0) %a0, <16 x i32> %a1) #0 {
b0:
%v0 = alloca <16 x i32>, align 64
store <16 x i32> %a1, <16 x i32>* %v0, align 64, !tbaa !0
define i32 @f2() #0 {
b0:
%v0 = load <16 x i32>, <16 x i32>* @g2, align 64, !tbaa !0
- tail call void @f1(%s.0* byval @g1, <16 x i32> %v0)
+ tail call void @f1(%s.0* byval(%s.0) @g1, <16 x i32> %v0)
ret i32 0
}
; Function Attrs: nounwind
define i32 @main() #0 {
entry:
- %call = tail call i32 (i32, ...) @foo(i32 undef, i32 2, %struct.AAA* byval align 4 @aaa, i32 4)
+ %call = tail call i32 (i32, ...) @foo(i32 undef, i32 2, %struct.AAA* byval(%struct.AAA) align 4 @aaa, i32 4)
%call1 = tail call i32 (i8*, ...) @printf(i8* getelementptr inbounds ([13 x i8], [13 x i8]* @.str, i32 0, i32 0), i32 %call) #1
ret i32 %call
}
@.str = private unnamed_addr constant [13 x i8] c"result = %d\0A\00", align 1
; Function Attrs: nounwind
-define i32 @foo(i32 %xx, %struct.BBB* byval align 8 %eee, ...) #0 {
+define i32 @foo(i32 %xx, %struct.BBB* byval(%struct.BBB) align 8 %eee, ...) #0 {
entry:
%xx.addr = alloca i32, align 4
%ap = alloca [1 x %struct.__va_list_tag], align 8
store i32 0, i32* %retval
store i64 1000000, i64* %m, align 8
%0 = load i64, i64* %m, align 8
- %call = call i32 (i32, %struct.BBB*, ...) @foo(i32 1, %struct.BBB* byval align 8 bitcast ({ i8, i64, i32, [4 x i8] }* @ddd to %struct.BBB*), i64 %0, %struct.AAA* byval align 4 @aaa, i32 4)
+ %call = call i32 (i32, %struct.BBB*, ...) @foo(i32 1, %struct.BBB* byval(%struct.BBB) align 8 bitcast ({ i8, i64, i32, [4 x i8] }* @ddd to %struct.BBB*), i64 %0, %struct.AAA* byval(%struct.AAA) align 4 @aaa, i32 4)
store i32 %call, i32* %x, align 4
%1 = load i32, i32* %x, align 4
%call1 = call i32 (i8*, ...) @printf(i8* getelementptr inbounds ([13 x i8], [13 x i8]* @.str, i32 0, i32 0), i32 %1)
store i64 1000000, i64* %y, align 8
%0 = load i64, i64* %y, align 8
%1 = load i64, i64* %y, align 8
- %call = call i32 (i32, i32, i32, i32, i32, ...) @foo(i32 1, i32 2, i32 3, i32 4, i32 5, i64 %0, %struct.AAA* byval align 4 @aaa, i32 4, i64 %1)
+ %call = call i32 (i32, i32, i32, i32, i32, ...) @foo(i32 1, i32 2, i32 3, i32 4, i32 5, i64 %0, %struct.AAA* byval(%struct.AAA) align 4 @aaa, i32 4, i64 %1)
store i32 %call, i32* %x, align 4
%2 = load i32, i32* %x, align 4
%call1 = call i32 (i8*, ...) @printf(i8* getelementptr inbounds ([13 x i8], [13 x i8]* @.str, i32 0, i32 0), i32 %2)
@.str = private unnamed_addr constant [13 x i8] c"result = %d\0A\00", align 1
; Function Attrs: nounwind
-define i32 @foo(i32 %xx, i32 %z, i32 %m, %struct.AAA* byval align 4 %bbb, %struct.AAA* byval align 4 %GGG, ...) #0 {
+define i32 @foo(i32 %xx, i32 %z, i32 %m, %struct.AAA* byval(%struct.AAA) align 4 %bbb, %struct.AAA* byval(%struct.AAA) align 4 %GGG, ...) #0 {
entry:
%xx.addr = alloca i32, align 4
%z.addr = alloca i32, align 4
%retval = alloca i32, align 4
%x = alloca i32, align 4
store i32 0, i32* %retval
- %call = call i32 (i32, i32, i32, %struct.AAA*, %struct.AAA*, ...) @foo(i32 1, i32 3, i32 5, %struct.AAA* byval align 4 @aaa, %struct.AAA* byval align 4 @fff, i32 2, %struct.AAA* byval align 4 @xxx, %struct.AAA* byval align 4 @yyy, %struct.AAA* byval align 4 @ccc, i32 4)
+ %call = call i32 (i32, i32, i32, %struct.AAA*, %struct.AAA*, ...) @foo(i32 1, i32 3, i32 5, %struct.AAA* byval(%struct.AAA) align 4 @aaa, %struct.AAA* byval(%struct.AAA) align 4 @fff, i32 2, %struct.AAA* byval(%struct.AAA) align 4 @xxx, %struct.AAA* byval(%struct.AAA) align 4 @yyy, %struct.AAA* byval(%struct.AAA) align 4 @ccc, i32 4)
store i32 %call, i32* %x, align 4
%0 = load i32, i32* %x, align 4
%call1 = call i32 (i8*, ...) @printf(i8* getelementptr inbounds ([13 x i8], [13 x i8]* @.str, i32 0, i32 0), i32 %0)
declare void @llvm.dbg.declare(metadata, metadata, metadata) #0
- define hidden void @foo(i32* byval %dstRect) {
+ define hidden void @foo(i32* byval(i32) %dstRect) {
entry:
call void @llvm.dbg.declare(metadata i32* %dstRect, metadata !3, metadata !DIExpression()), !dbg !5
unreachable
%struct.Foo = type { i16, i16, i16 }
@foo = global %struct.Foo { i16 1, i16 2, i16 3 }, align 2
-define i16 @callee(%struct.Foo* byval %f) nounwind {
+define i16 @callee(%struct.Foo* byval(%struct.Foo) %f) nounwind {
entry:
; CHECK-LABEL: callee:
; CHECK: mov 2(r1), r12
; CHECK: mov &foo+4, 4(r1)
; CHECK-NEXT: mov &foo+2, 2(r1)
; CHECK-NEXT: mov &foo, 0(r1)
- %call = call i16 @callee(%struct.Foo* byval @foo)
+ %call = call i16 @callee(%struct.Foo* byval(%struct.Foo) @foo)
ret void
}
%VeryLarge = type { i8, i32, i32, i32, i32, i32, i32, i32, i32, i32 }
; intentionally cause a spill
-define void @inc(%VeryLarge* byval align 1 %s) {
+define void @inc(%VeryLarge* byval(%VeryLarge) align 1 %s) {
entry:
%p0 = getelementptr inbounds %VeryLarge, %VeryLarge* %s, i32 0, i32 0
%0 = load i8, i8* %p0
; N64-NEXT: daddu $sp, $sp, $1
entry:
%a = alloca %struct.S1, align 4
- call void @f2(%struct.S1* byval align 4 %a)
+ call void @f2(%struct.S1* byval(%struct.S1) align 4 %a)
ret void
}
-declare dso_local void @f2(%struct.S1* byval align 4) #1
+declare dso_local void @f2(%struct.S1* byval(%struct.S1) align 4) #1
; O32-SDAG-LABEL: Initial selection DAG: %bb.0 'g2:entry'
; O32-SDAG: t{{.*}}: ch,glue = callseq_start t{{.*}}, TargetConstant:i32<{{.*}}>
%1 = bitcast %struct.S1* %byval-temp to i8*
%2 = bitcast %struct.S1* %0 to i8*
call void @llvm.memcpy.p0i8.p0i8.i32(i8* align 4 %1, i8* align 1 %2, i32 65520, i1 false)
- call void @f2(%struct.S1* byval align 4 %byval-temp)
+ call void @f2(%struct.S1* byval(%struct.S1) align 4 %byval-temp)
ret void
}
define void @foo2() nounwind {
entry:
%s = alloca %struct.S, align 4
- call void @foo1(%struct.S* byval %s)
+ call void @foo1(%struct.S* byval(%struct.S) %s)
ret void
}
-declare void @foo1(%struct.S* byval)
+declare void @foo1(%struct.S* byval(%struct.S))
%struct.str = type { i32, i32, [3 x i32*] }
-declare fastcc void @_Z1F3str(%struct.str* noalias nocapture sret %agg.result, %struct.str* byval nocapture readonly align 4 %s)
+declare fastcc void @_Z1F3str(%struct.str* noalias nocapture sret %agg.result, %struct.str* byval(%struct.str) nocapture readonly align 4 %s)
-define i32 @_Z1g3str(%struct.str* byval nocapture readonly align 4 %s) {
+define i32 @_Z1g3str(%struct.str* byval(%struct.str) nocapture readonly align 4 %s) {
; CHECK-LABEL: _Z1g3str:
; CHECK: sw $7, [[OFFSET:[0-9]+]]($sp)
; CHECK: lw ${{[0-9]+}}, [[OFFSET]]($sp)
%ref.tmp = alloca %struct.str, align 4
%0 = bitcast %struct.str* %ref.tmp to i8*
call void @llvm.lifetime.start.p0i8(i64 20, i8* nonnull %0)
- call fastcc void @_Z1F3str(%struct.str* nonnull sret %ref.tmp, %struct.str* byval nonnull align 4 %s)
+ call fastcc void @_Z1F3str(%struct.str* nonnull sret %ref.tmp, %struct.str* byval(%struct.str) nonnull align 4 %s)
%cl.sroa.3.0..sroa_idx2 = getelementptr inbounds %struct.str, %struct.str* %ref.tmp, i32 0, i32 1
%cl.sroa.3.0.copyload = load i32, i32* %cl.sroa.3.0..sroa_idx2, align 4
call void @llvm.lifetime.end.p0i8(i64 20, i8* nonnull %0)
%agg.tmp = alloca %struct.S1, align 1
%tmp = getelementptr inbounds %struct.S1, %struct.S1* %agg.tmp, i32 0, i32 0, i32 0
call void @llvm.memcpy.p0i8.p0i8.i32(i8* align 1 %tmp, i8* align 1 getelementptr inbounds (%struct.S1, %struct.S1* @s1, i32 0, i32 0, i32 0), i32 65536, i1 false)
- call void @f2(%struct.S1* byval %agg.tmp) nounwind
+ call void @f2(%struct.S1* byval(%struct.S1) %agg.tmp) nounwind
ret void
}
-declare void @f2(%struct.S1* byval)
+declare void @f2(%struct.S1* byval(%struct.S1))
declare void @llvm.memcpy.p0i8.p0i8.i32(i8* nocapture, i8* nocapture, i32, i1) nounwind
; MIPS64R6: ld $[[SPTR:[0-9]+]], %got_disp(arr)(
- tail call void @extern_func([7 x i8]* byval @arr) nounwind
+ tail call void @extern_func([7 x i8]* byval([7 x i8]) @arr) nounwind
ret void
}
-declare void @extern_func([7 x i8]* byval)
+declare void @extern_func([7 x i8]* byval([7 x i8]))
; CHECK-NEXT: addiu $sp, $sp, 64
entry:
%agg.tmp10 = alloca %struct.S3, align 4
- call void @callee1(float 2.000000e+01, %struct.S1* byval bitcast (%0* @f1.s1 to %struct.S1*)) nounwind
- call void @callee2(%struct.S2* byval @f1.s2) nounwind
+ call void @callee1(float 2.000000e+01, %struct.S1* byval(%struct.S1) bitcast (%0* @f1.s1 to %struct.S1*)) nounwind
+ call void @callee2(%struct.S2* byval(%struct.S2) @f1.s2) nounwind
%tmp11 = getelementptr inbounds %struct.S3, %struct.S3* %agg.tmp10, i32 0, i32 0
store i8 11, i8* %tmp11, align 4
- call void @callee3(float 2.100000e+01, %struct.S3* byval %agg.tmp10, %struct.S1* byval bitcast (%0* @f1.s1 to %struct.S1*)) nounwind
+ call void @callee3(float 2.100000e+01, %struct.S3* byval(%struct.S3) %agg.tmp10, %struct.S1* byval(%struct.S1) bitcast (%0* @f1.s1 to %struct.S1*)) nounwind
ret void
}
-declare void @callee1(float, %struct.S1* byval)
+declare void @callee1(float, %struct.S1* byval(%struct.S1))
-declare void @callee2(%struct.S2* byval)
+declare void @callee2(%struct.S2* byval(%struct.S2))
-declare void @callee3(float, %struct.S3* byval, %struct.S1* byval)
+declare void @callee3(float, %struct.S3* byval(%struct.S3), %struct.S1* byval(%struct.S1))
-define void @f2(float %f, %struct.S1* nocapture byval %s1) nounwind {
+define void @f2(float %f, %struct.S1* nocapture byval(%struct.S1) %s1) nounwind {
; CHECK-LABEL: f2:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: lui $2, %hi(_gp_disp)
declare void @callee4(i32, double, i64, i32, i16 signext, i8 signext, float)
-define void @f3(%struct.S2* nocapture byval %s2) nounwind {
+define void @f3(%struct.S2* nocapture byval(%struct.S2) %s2) nounwind {
; CHECK-LABEL: f3:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: lui $2, %hi(_gp_disp)
ret void
}
-define void @f4(float %f, %struct.S3* nocapture byval %s3, %struct.S1* nocapture byval %s1) nounwind {
+define void @f4(float %f, %struct.S3* nocapture byval(%struct.S3) %s3, %struct.S1* nocapture byval(%struct.S1) %s1) nounwind {
; CHECK-LABEL: f4:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: lui $2, %hi(_gp_disp)
%struct.S4 = type { [4 x i32] }
-define void @f5(i64 %a0, %struct.S4* nocapture byval %a1) nounwind {
+define void @f5(i64 %a0, %struct.S4* nocapture byval(%struct.S4) %a1) nounwind {
; CHECK-LABEL: f5:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: lui $2, %hi(_gp_disp)
; CHECK-NEXT: jr $ra
; CHECK-NEXT: addiu $sp, $sp, 32
entry:
- tail call void @f6(%struct.S4* byval %a1, i64 %a0) nounwind
+ tail call void @f6(%struct.S4* byval(%struct.S4) %a1, i64 %a0) nounwind
ret void
}
-declare void @f6(%struct.S4* nocapture byval, i64)
+declare void @f6(%struct.S4* nocapture byval(%struct.S4), i64)
@gs1 = external global %struct.S
-declare i32 @callee9(%struct.S* byval)
+declare i32 @callee9(%struct.S* byval(%struct.S))
define i32 @caller9_0() nounwind {
entry:
; PIC64R6: jalrc $25
; PIC16: jalrc
- %call = tail call i32 @callee9(%struct.S* byval @gs1) nounwind
+ %call = tail call i32 @callee9(%struct.S* byval(%struct.S) @gs1) nounwind
ret i32 %call
}
ret i32 %call
}
-declare i32 @callee11(%struct.S* byval)
+declare i32 @callee11(%struct.S* byval(%struct.S))
define i32 @caller11() nounwind noinline {
entry:
; PIC64R6: jalrc $25
; PIC16: jalrc
- %call = tail call i32 @callee11(%struct.S* byval @gs1) nounwind
+ %call = tail call i32 @callee11(%struct.S* byval(%struct.S) @gs1) nounwind
ret i32 %call
}
declare void @llvm.memcpy.p0i8.p0i8.i32(i8* nocapture, i8* nocapture, i32, i1) nounwind
-define i32 @caller12(%struct.S* nocapture byval %a0) nounwind {
+define i32 @caller12(%struct.S* nocapture byval(%struct.S) %a0) nounwind {
entry:
; ALL-LABEL: caller12:
; PIC32: jalr $25
; MIPS32R6-DAG: lhu $[[PART1:[0-9]+]], 2($[[R0]])
- tail call void @foo2(%struct.S1* byval getelementptr inbounds (%struct.S2, %struct.S2* @s2, i32 0, i32 1)) nounwind
+ tail call void @foo2(%struct.S1* byval(%struct.S1) getelementptr inbounds (%struct.S2, %struct.S2* @s2, i32 0, i32 1)) nounwind
ret void
}
; MIPS32R6-EB-DAG: sll $[[T3:[0-9]+]], $[[T1]], 8
; MIPS32R6-EB-DAG: or $5, $[[T2]], $[[T3]]
- tail call void @foo4(%struct.S4* byval @s4) nounwind
+ tail call void @foo4(%struct.S4* byval(%struct.S4) @s4) nounwind
ret void
}
-declare void @foo2(%struct.S1* byval)
+declare void @foo2(%struct.S1* byval(%struct.S1))
-declare void @foo4(%struct.S4* byval)
+declare void @foo4(%struct.S4* byval(%struct.S4))
%struct.S = type { i32, i32 }
; Function Attrs: nounwind
-define void @_Z11TakesStruct1SPi(%struct.S* byval nocapture readonly %input, i32* nocapture %output) #0 {
+define void @_Z11TakesStruct1SPi(%struct.S* byval(%struct.S) nocapture readonly %input, i32* nocapture %output) #0 {
entry:
; CHECK-LABEL: @_Z11TakesStruct1SPi
; PTX-LABEL: .visible .entry _Z11TakesStruct1SPi(
%struct.S = type { i32*, i32* }
-define void @ptr_in_byval_kernel(%struct.S* byval %input, i32* %output) {
+define void @ptr_in_byval_kernel(%struct.S* byval(%struct.S) %input, i32* %output) {
; CHECK-LABEL: .visible .entry ptr_in_byval_kernel(
; CHECK: ld.param.u64 %[[optr:rd.*]], [ptr_in_byval_kernel_param_1]
; CHECK: cvta.to.global.u64 %[[optr_g:.*]], %[[optr]];
; Regular functions lower byval arguments differently. We need to make
; sure that we're loading byval argument data using [symbol+offset].
; There's also no assumption that all pointers within are in global space.
-define void @ptr_in_byval_func(%struct.S* byval %input, i32* %output) {
+define void @ptr_in_byval_func(%struct.S* byval(%struct.S) %input, i32* %output) {
; CHECK-LABEL: .visible .func ptr_in_byval_func(
; CHECK: ld.param.u64 %[[optr:rd.*]], [ptr_in_byval_func_param_1]
; CHECK: ld.param.u64 %[[iptr:rd.*]], [ptr_in_byval_func_param_0+8]
; RUN: llc < %s -march=nvptx -mcpu=sm_20 | FileCheck %s
;;; Need 4-byte alignment on float* passed byval
-define ptx_device void @t1(float* byval %x) {
+define ptx_device void @t1(float* byval(float) %x) {
; CHECK: .func t1
; CHECK: .param .align 4 .b8 t1_param_0[4]
ret void
;;; Need 8-byte alignment on double* passed byval
-define ptx_device void @t2(double* byval %x) {
+define ptx_device void @t2(double* byval(double) %x) {
; CHECK: .func t2
; CHECK: .param .align 8 .b8 t2_param_0[8]
ret void
;;; Need 4-byte alignment on float2* passed byval
%struct.float2 = type { float, float }
-define ptx_device void @t3(%struct.float2* byval %x) {
+define ptx_device void @t3(%struct.float2* byval(%struct.float2) %x) {
; CHECK: .func t3
; CHECK: .param .align 4 .b8 t3_param_0[8]
ret void
;;; Need at least 4-byte alignment in order to avoid miscompilation by
;;; ptxas for sm_50+
-define ptx_device void @t4(i8* byval %x) {
+define ptx_device void @t4(i8* byval(i8) %x) {
; CHECK: .func t4
; CHECK: .param .align 4 .b8 t4_param_0[1]
ret void
}
;;; Make sure we adjust alignment at the call site as well.
-define ptx_device void @t5(i8* align 2 byval %x) {
+define ptx_device void @t5(i8* align 2 byval(i8) %x) {
; CHECK: .func t5
; CHECK: .param .align 4 .b8 t5_param_0[1]
; CHECK: {
; CHECK: .param .align 4 .b8 param0[1];
; CHECK: call.uni
- call void @t4(i8* byval %x)
+ call void @t4(i8* byval(i8) %x)
ret void
}
; void llvm::MachineMemOperand::refineAlignment(const llvm::MachineMemOperand*):
; Assertion `MMO->getFlags() == getFlags() && "Flags mismatch !"' failed.
-declare void @_Z3fn11F(%class.F* byval align 8) local_unnamed_addr
+declare void @_Z3fn11F(%class.F* byval(%class.F) align 8) local_unnamed_addr
declare void @llvm.memcpy.p0i8.p0i8.i64(i8* nocapture writeonly, i8* nocapture readonly, i64, i1)
declare signext i32 @_ZN1F11isGlobalRegEv(%class.F*) local_unnamed_addr
declare void @llvm.lifetime.start.p0i8(i64, i8* nocapture)
%class.F = type { i32, i64, i8, [64 x i8], i8, i32* }
-define signext i32 @_Z29EmitOMPAtomicSimpleUpdateExpr1F(%class.F* byval align 8 %p1) local_unnamed_addr {
+define signext i32 @_Z29EmitOMPAtomicSimpleUpdateExpr1F(%class.F* byval(%class.F) align 8 %p1) local_unnamed_addr {
entry:
- call void @_Z3fn11F(%class.F* byval nonnull align 8 %p1)
+ call void @_Z3fn11F(%class.F* byval(%class.F) nonnull align 8 %p1)
%call = call signext i32 @_ZN1F11isGlobalRegEv(%class.F* nonnull %p1)
ret i32 %call
}
%1 = bitcast %class.F* %agg.tmp1 to i8*
call void @llvm.lifetime.start.p0i8(i64 96, i8* nonnull %1)
call void @llvm.memcpy.p0i8.p0i8.i64(i8* align 8 nonnull %1, i8* align 8 nonnull %0, i64 96, i1 false)
- call void @_Z3fn11F(%class.F* byval nonnull align 8 %XLValue)
+ call void @_Z3fn11F(%class.F* byval(%class.F) nonnull align 8 %XLValue)
%call.i = call signext i32 @_ZN1F11isGlobalRegEv(%class.F* nonnull %agg.tmp1)
call void @llvm.lifetime.end.p0i8(i64 96, i8* nonnull %1)
call void @llvm.lifetime.end.p0i8(i64 96, i8* nonnull %0)
%0 = type { double, double }
-define void @maybe_an_fma(%0* sret %agg.result, %0* byval %a, %0* byval %b, %0* byval %c) nounwind {
+define void @maybe_an_fma(%0* sret %agg.result, %0* byval(%0) %a, %0* byval(%0) %b, %0* byval(%0) %c) nounwind {
entry:
%a.realp = getelementptr inbounds %0, %0* %a, i32 0, i32 0
%a.real = load double, double* %a.realp
; CHECK: ld 3, -[[OFFSET1]](1)
; CHECK: ld 3, -[[OFFSET2]](1)
-define i8* @func2({ i64, i8* } %array1, %tarray* byval %array2) {
+define i8* @func2({ i64, i8* } %array1, %tarray* byval(%tarray) %array2) {
entry:
%array1_ptr = extractvalue {i64, i8* } %array1, 1
%tmp = getelementptr inbounds %tarray, %tarray* %array2, i32 0, i32 1
; CHECK: ld 3, -[[OFFSET1]](1)
; CHECK: ld 3, -[[OFFSET2]](1)
-define i8* @func3({ i64, i8* }* byval %array1, %tarray* byval %array2) {
+define i8* @func3({ i64, i8* }* byval({ i64, i8* }) %array1, %tarray* byval(%tarray) %array2) {
entry:
%tmp1 = getelementptr inbounds { i64, i8* }, { i64, i8* }* %array1, i32 0, i32 1
%array1_ptr = load i8*, i8** %tmp1
define i8* @func4(i64 %p1, i64 %p2, i64 %p3, i64 %p4,
i64 %p5, i64 %p6, i64 %p7, i64 %p8,
- { i64, i8* } %array1, %tarray* byval %array2) {
+ { i64, i8* } %array1, %tarray* byval(%tarray) %array2) {
entry:
%array1_ptr = extractvalue {i64, i8* } %array1, 1
%tmp = getelementptr inbounds %tarray, %tarray* %array2, i32 0, i32 1
%struct.anon = type { i32, i32 }
declare void @foo(%struct.anon* %v)
-define void @test(i32 %a, i32 %b, %struct.anon* byval nocapture %v) {
+define void @test(i32 %a, i32 %b, %struct.anon* byval(%struct.anon) nocapture %v) {
entry:
call void @foo(%struct.anon* %v)
ret void
; RUN: llc -verify-machineinstrs -mcpu=ppc64 -ppc-asm-full-reg-names < %s | FileCheck %s
target datalayout = "E-m:o-p:32:32-f64:32:64-n32"
target triple = "powerpc-unknown-linux-gnu"
-
+
%struct.sm = type { i8, i8 }
-
+
; Function Attrs: nounwind ssp
-define void @foo(%struct.sm* byval %s) #0 {
+define void @foo(%struct.sm* byval(%struct.sm) %s) #0 {
entry:
%a = getelementptr inbounds %struct.sm, %struct.sm* %s, i32 0, i32 0
%0 = load i8, i8* %a, align 1
%add = add nuw nsw i32 %conv2, 3
%conv1 = trunc i32 %add to i8
store i8 %conv1, i8* %a, align 1
- call void @bar(%struct.sm* byval %s, %struct.sm* byval %s) #1
+ call void @bar(%struct.sm* byval(%struct.sm) %s, %struct.sm* byval(%struct.sm) %s) #1
ret void
}
; CHECK: lhz r4, [[OFF]]({{r[3?1]}})
; CHECK: bl bar
; CHECK: blr
-
-declare void @bar(%struct.sm* byval, %struct.sm* byval)
-
+
+declare void @bar(%struct.sm* byval(%struct.sm), %struct.sm* byval(%struct.sm))
+
attributes #0 = { nounwind ssp }
attributes #1 = { nounwind }
-
+
declare void @bar(i32*, i32*) #0
-define void @goo(%struct.s* byval nocapture readonly %a, i32 signext %n) #0 {
+define void @goo(%struct.s* byval(%struct.s) nocapture readonly %a, i32 signext %n) #0 {
entry:
%0 = zext i32 %n to i64
%vla = alloca i32, i64 %0, align 128
; This tests correct handling of empty aggregate parameters and return values.
; An empty parameter passed by value does not consume a protocol register or
; a parameter save area doubleword. An empty parameter passed by reference
-; is treated as any other pointer parameter. An empty aggregate return value
-; is treated as any other aggregate return value, passed via address as a
+; is treated as any other pointer parameter. An empty aggregate return value
+; is treated as any other aggregate return value, passed via address as a
; hidden parameter in GPR3. In this example, GPR3 contains the return value
; address, GPR4 contains the address of e2, and e1 and e3 are not passed or
; received.
%struct.empty = type {}
-define void @callee(%struct.empty* noalias sret %agg.result, %struct.empty* byval %a1, %struct.empty* %a2, %struct.empty* byval %a3) nounwind {
+define void @callee(%struct.empty* noalias sret %agg.result, %struct.empty* byval(%struct.empty) %a1, %struct.empty* %a2, %struct.empty* byval(%struct.empty) %a3) nounwind {
entry:
%a2.addr = alloca %struct.empty*, align 8
store %struct.empty* %a2, %struct.empty** %a2.addr, align 8
%e1 = alloca %struct.empty, align 1
%e2 = alloca %struct.empty, align 1
%e3 = alloca %struct.empty, align 1
- call void @callee(%struct.empty* sret %agg.result, %struct.empty* byval %e1, %struct.empty* %e2, %struct.empty* byval %e3)
+ call void @callee(%struct.empty* sret %agg.result, %struct.empty* byval(%struct.empty) %e1, %struct.empty* %e2, %struct.empty* byval(%struct.empty) %e3)
ret void
}
; Since we can only pass a max of 8 float128 value in VSX registers, ensure we
; store to stack if passing more.
; Function Attrs: norecurse nounwind readonly
-define fp128 @testStruct_03(%struct.With9fp128params* byval nocapture readonly align 16 %a) {
+define fp128 @testStruct_03(%struct.With9fp128params* byval(%struct.With9fp128params) nocapture readonly align 16 %a) {
; CHECK-LABEL: testStruct_03:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: lxv v2, 128(r1)
; Function Attrs: norecurse nounwind readonly
-define fp128 @testNestedAggregate(%struct.MixedC* byval nocapture readonly align 16 %a) {
+define fp128 @testNestedAggregate(%struct.MixedC* byval(%struct.MixedC) nocapture readonly align 16 %a) {
; CHECK-LABEL: testNestedAggregate:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: std r8, 72(r1)
define internal fastcc void @CallPassByValue(%"myClass::Mem"* %E) align 2 {
entry:
- call fastcc void @PassByValue(%"myClass::Mem"* byval nonnull align 8 undef);
+ call fastcc void @PassByValue(%"myClass::Mem"* byval(%"myClass::Mem") nonnull align 8 undef);
ret void
; CHECK-LABEL: PassByValue
}
declare dso_local fastcc void
- @PassByValue(%"myClass::Mem"* byval nocapture readonly align 8) align 2
+ @PassByValue(%"myClass::Mem"* byval(%"myClass::Mem") nocapture readonly align 8) align 2
; Verify Paramater Save Area is allocated if parameter exceed the number that
; can be passed via registers
define internal fastcc void @AggMemExprEmitter(%"myClass::MemK"* %E) align 2 {
entry:
- call fastcc void @MemExprEmitterInitialization(%"myClass::MemK" *
- byval nonnull align 8 undef);
+ call fastcc void @MemExprEmitterInitialization(%"myClass::MemK"*
+ byval(%"myClass::MemK") nonnull align 8 undef);
ret void
; CHECK-LABEL: AggMemExprEmitter
}
declare dso_local fastcc void
- @MemExprEmitterInitialization(%"myClass::MemK" *
- byval nocapture readonly align 8) align 2
+ @MemExprEmitterInitialization(%"myClass::MemK"*
+ byval(%"myClass::MemK") nocapture readonly align 8) align 2
%__exception_ = getelementptr inbounds %"class.std::__1::__assoc_sub_state", %"class.std::__1::__assoc_sub_state"* %this, i64 0, i32 1
%0 = bitcast { i64, i64 }* %tmp to i8*
call void @llvm.memset.p0i8.i64(i8* align 8 %0, i8 0, i64 16, i1 false)
- call void @_ZNSt15__exception_ptr13exception_ptrC1EMS0_FvvE(%"class.std::__exception_ptr::exception_ptr"* %ref.tmp, { i64, i64 }* byval %tmp) #5
+ call void @_ZNSt15__exception_ptr13exception_ptrC1EMS0_FvvE(%"class.std::__exception_ptr::exception_ptr"* %ref.tmp, { i64, i64 }* byval({ i64, i64 }) %tmp) #5
%call = call zeroext i1 @_ZNSt15__exception_ptrneERKNS_13exception_ptrES2_(%"class.std::__exception_ptr::exception_ptr"* %__exception_, %"class.std::__exception_ptr::exception_ptr"* %ref.tmp) #5
call void @_ZNSt15__exception_ptr13exception_ptrD1Ev(%"class.std::__exception_ptr::exception_ptr"* %ref.tmp) #5
br i1 %call, label %if.then, label %if.end
declare zeroext i1 @_ZNSt15__exception_ptrneERKNS_13exception_ptrES2_(%"class.std::__exception_ptr::exception_ptr"*, %"class.std::__exception_ptr::exception_ptr"*) #1
; Function Attrs: nounwind optsize
-declare void @_ZNSt15__exception_ptr13exception_ptrC1EMS0_FvvE(%"class.std::__exception_ptr::exception_ptr"*, { i64, i64 }* byval) #1
+declare void @_ZNSt15__exception_ptr13exception_ptrC1EMS0_FvvE(%"class.std::__exception_ptr::exception_ptr"*, { i64, i64 }* byval({ i64, i64 })) #1
; Function Attrs: nounwind optsize
declare void @_ZNSt15__exception_ptr13exception_ptrD1Ev(%"class.std::__exception_ptr::exception_ptr"*) #1
%struct.S6 = type { [6 x i8] }
%struct.S7 = type { [7 x i8] }
-define void @test(%struct.S3* byval %s3, %struct.S5* byval %s5, %struct.S6* byval %s6, %struct.S7* byval %s7) nounwind {
+define void @test(%struct.S3* byval(%struct.S3) %s3, %struct.S5* byval(%struct.S5) %s5, %struct.S6* byval(%struct.S6) %s6, %struct.S7* byval(%struct.S7) %s7) nounwind {
entry:
- call void @check(%struct.S3* byval %s3, %struct.S5* byval %s5, %struct.S6* byval %s6, %struct.S7* byval %s7)
+ call void @check(%struct.S3* byval(%struct.S3) %s3, %struct.S5* byval(%struct.S5) %s5, %struct.S6* byval(%struct.S6) %s6, %struct.S7* byval(%struct.S7) %s7)
ret void
}
; CHECK-DAG: ld 4, 56(1)
; CHECK-DAG: ld 3, 48(1)
-declare void @check(%struct.S3* byval, %struct.S5* byval, %struct.S6* byval, %struct.S7* byval)
+declare void @check(%struct.S3* byval(%struct.S3), %struct.S5* byval(%struct.S5), %struct.S6* byval(%struct.S6), %struct.S7* byval(%struct.S7))
%0 = type { double, double }
-define void @maybe_an_fma(%0* sret %agg.result, %0* byval %a, %0* byval %b, %0* byval %c) nounwind {
+define void @maybe_an_fma(%0* sret %agg.result, %0* byval(%0) %a, %0* byval(%0) %b, %0* byval(%0) %c) nounwind {
entry:
%a.realp = getelementptr inbounds %0, %0* %a, i32 0, i32 0
%a.real = load double, double* %a.realp
; value. Since the target does bitcast through memory and we no longer
; remember the address we need to do the store in a fresh local
; address.
-define ppc_fp128 @test(%struct.S* byval %x) nounwind {
+define ppc_fp128 @test(%struct.S* byval(%struct.S) %x) nounwind {
; CHECK-LABEL: test:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: std 5, -16(1)
@gt = common global %struct.test zeroinitializer, align 16
@gp = common global %struct.pad zeroinitializer, align 8
-define signext i32 @callee1(i32 signext %x, %struct.test* byval align 16 nocapture readnone %y, i32 signext %z) {
+define signext i32 @callee1(i32 signext %x, %struct.test* byval(%struct.test) align 16 nocapture readnone %y, i32 signext %z) {
entry:
ret i32 %z
}
; CHECK: mr 3, 7
; CHECK: blr
-declare signext i32 @test1(i32 signext, %struct.test* byval align 16, i32 signext)
+declare signext i32 @test1(i32 signext, %struct.test* byval(%struct.test) align 16, i32 signext)
define void @caller1(i32 signext %z) {
entry:
- %call = tail call signext i32 @test1(i32 signext 0, %struct.test* byval align 16 @gt, i32 signext %z)
+ %call = tail call signext i32 @test1(i32 signext 0, %struct.test* byval(%struct.test) align 16 @gt, i32 signext %z)
ret void
}
; CHECK-LABEL: @caller1
; CHECK: mr 7, 3
; CHECK: bl test1
-define i64 @callee2(%struct.pad* byval nocapture readnone %x, i32 signext %y, %struct.test* byval align 16 nocapture readonly %z) {
+define i64 @callee2(%struct.pad* byval(%struct.pad) nocapture readnone %x, i32 signext %y, %struct.test* byval(%struct.test) align 16 nocapture readonly %z) {
entry:
%x1 = getelementptr inbounds %struct.test, %struct.test* %z, i64 0, i32 0
%0 = load i64, i64* %x1, align 16
; CHECK: ld {{[0-9]+}}, 128(1)
; CHECK: blr
-declare i64 @test2(%struct.pad* byval, i32 signext, %struct.test* byval align 16)
+declare i64 @test2(%struct.pad* byval(%struct.pad), i32 signext, %struct.test* byval(%struct.test) align 16)
define void @caller2(i64 %z) {
entry:
%tmp = alloca %struct.test, align 16
%.compoundliteral.sroa.0.0..sroa_idx = getelementptr inbounds %struct.test, %struct.test* %tmp, i64 0, i32 0
store i64 %z, i64* %.compoundliteral.sroa.0.0..sroa_idx, align 16
- %call = call i64 @test2(%struct.pad* byval @gp, i32 signext 0, %struct.test* byval align 16 %tmp)
+ %call = call i64 @test2(%struct.pad* byval(%struct.pad) @gp, i32 signext 0, %struct.test* byval(%struct.test) align 16 %tmp)
ret void
}
; CHECK-LABEL: @caller2
%struct.pos_T = type { i64 }
; check that we're not copying stuff between R and X registers
-define internal void @serialize_pos(%struct.pos_T* byval %pos, %struct.__sFILE* %fp) nounwind {
+define internal void @serialize_pos(%struct.pos_T* byval(%struct.pos_T) %pos, %struct.__sFILE* %fp) nounwind {
entry:
ret void
}
%struct.byvalTest = type { [8 x i8] }
@byval = common global %struct.byvalTest zeroinitializer
-define void @byval_callee(%struct.byvalTest* byval %ptr) { ret void }
+define void @byval_callee(%struct.byvalTest* byval(%struct.byvalTest) %ptr) { ret void }
define void @byval_caller() {
- tail call void @byval_callee(%struct.byvalTest* byval @byval)
+ tail call void @byval_callee(%struct.byvalTest* byval(%struct.byvalTest) @byval)
ret void
; CHECK-SCO-LABEL: bl byval_callee
@gs = common global %struct.small_arg zeroinitializer, align 2
@gf = common global float 0.000000e+00, align 4
-define void @callee1(%struct.small_arg* noalias nocapture sret %agg.result, %struct.large_arg* byval nocapture readnone %pad, %struct.small_arg* byval nocapture readonly %x) {
+define void @callee1(%struct.small_arg* noalias nocapture sret(%struct.small_arg) %agg.result, %struct.large_arg* byval(%struct.large_arg) nocapture readnone %pad, %struct.small_arg* byval(%struct.small_arg) nocapture readonly %x) {
entry:
%0 = bitcast %struct.small_arg* %x to i32*
%1 = bitcast %struct.small_arg* %agg.result to i32*
define void @caller1() {
entry:
%tmp = alloca %struct.small_arg, align 2
- call void @test1(%struct.small_arg* sret %tmp, %struct.large_arg* byval @gl, %struct.small_arg* byval @gs)
+ call void @test1(%struct.small_arg* sret %tmp, %struct.large_arg* byval(%struct.large_arg) @gl, %struct.small_arg* byval(%struct.small_arg) @gs)
ret void
}
; CHECK: @caller1
; CHECK: stw {{[0-9]+}}, 124(1)
; CHECK: bl test1
-declare void @test1(%struct.small_arg* sret, %struct.large_arg* byval, %struct.small_arg* byval)
+declare void @test1(%struct.small_arg* sret, %struct.large_arg* byval(%struct.large_arg), %struct.small_arg* byval(%struct.small_arg))
define float @callee2(float %pad1, float %pad2, float %pad3, float %pad4, float %pad5, float %pad6, float %pad7, float %pad8, float %pad9, float %pad10, float %pad11, float %pad12, float %pad13, float %x) {
entry:
@gs = common global %struct.small_arg zeroinitializer, align 2
@gf = common global float 0.000000e+00, align 4
-define void @callee1(%struct.small_arg* noalias nocapture sret %agg.result, %struct.large_arg* byval nocapture readnone %pad, %struct.small_arg* byval nocapture readonly %x) {
+define void @callee1(%struct.small_arg* noalias nocapture sret %agg.result, %struct.large_arg* byval(%struct.large_arg) nocapture readnone %pad, %struct.small_arg* byval(%struct.small_arg) nocapture readonly %x) {
entry:
%0 = bitcast %struct.small_arg* %x to i32*
%1 = bitcast %struct.small_arg* %agg.result to i32*
define void @caller1() {
entry:
%tmp = alloca %struct.small_arg, align 2
- call void @test1(%struct.small_arg* sret %tmp, %struct.large_arg* byval @gl, %struct.small_arg* byval @gs)
+ call void @test1(%struct.small_arg* sret %tmp, %struct.large_arg* byval(%struct.large_arg) @gl, %struct.small_arg* byval(%struct.small_arg) @gs)
ret void
}
; CHECK: @caller1
; CHECK: stw {{[0-9]+}}, 104(1)
; CHECK: bl test1
-declare void @test1(%struct.small_arg* sret, %struct.large_arg* byval, %struct.small_arg* byval)
+declare void @test1(%struct.small_arg* sret, %struct.large_arg* byval(%struct.large_arg), %struct.small_arg* byval(%struct.small_arg))
define float @callee2(float %pad1, float %pad2, float %pad3, float %pad4, float %pad5, float %pad6, float %pad7, float %pad8, float %pad9, float %pad10, float %pad11, float %pad12, float %pad13, float %x) {
entry:
ret float %x
}
; CHECK: @callee2
-; CHECK: lfs {{[0-9]+}}, 136(1)
+; CHECK: lfs {{[0-9]+}}, 136(1)
; CHECK: blr
define void @caller2() {
%struct.foo = type { i8, i8 }
-define void @_Z5check3foos(%struct.foo* nocapture byval %f, i16 signext %i) noinline {
+define void @_Z5check3foos(%struct.foo* nocapture byval(%struct.foo) %f, i16 signext %i) noinline {
; CHECK-LABEL: _Z5check3foos:
; CHECK: sth 3, {{[0-9]+}}(1)
; CHECK: lha {{[0-9]+}}, {{[0-9]+}}(1)
; Function Attrs: nounwind readonly
define signext i32 @main() #0 {
entry:
- %call = tail call fastcc signext i32 @func_90(%struct.S1* byval bitcast ({ i8, i8, i8, i8, i8, i8, i8, i8 }* @main.l_1554 to %struct.S1*))
+ %call = tail call fastcc signext i32 @func_90(%struct.S1* byval(%struct.S1) bitcast ({ i8, i8, i8, i8, i8, i8, i8, i8 }* @main.l_1554 to %struct.S1*))
; CHECK-NOT: ld {{[0-9]+}}, main.l_1554@toc@l
ret i32 %call
}
; Function Attrs: nounwind readonly
-define internal fastcc signext i32 @func_90(%struct.S1* byval nocapture %p_91) #0 {
+define internal fastcc signext i32 @func_90(%struct.S1* byval(%struct.S1) nocapture %p_91) #0 {
entry:
%0 = bitcast %struct.S1* %p_91 to i64*
%bf.load = load i64, i64* %0, align 1
call void @llvm.memcpy.p0i8.p0i8.i64(i8* align 16 %62, i8* align 16 bitcast (%struct.S1998* @s1998 to i8*), i64 5168, i1 false)
%63 = bitcast %struct.S1998* %agg.tmp112 to i8*
call void @llvm.memcpy.p0i8.p0i8.i64(i8* align 16 %63, i8* align 16 bitcast (%struct.S1998* getelementptr inbounds ([5 x %struct.S1998], [5 x %struct.S1998]* @a1998, i32 0, i64 2) to i8*), i64 5168, i1 false)
- call void @check1998(%struct.S1998* sret %agg.tmp, %struct.S1998* byval align 16 %agg.tmp111, %struct.S1998* getelementptr inbounds ([5 x %struct.S1998], [5 x %struct.S1998]* @a1998, i32 0, i64 1), %struct.S1998* byval align 16 %agg.tmp112)
- call void @checkx1998(%struct.S1998* byval align 16 %agg.tmp)
+ call void @check1998(%struct.S1998* sret %agg.tmp, %struct.S1998* byval(%struct.S1998) align 16 %agg.tmp111, %struct.S1998* getelementptr inbounds ([5 x %struct.S1998], [5 x %struct.S1998]* @a1998, i32 0, i64 1), %struct.S1998* byval(%struct.S1998) align 16 %agg.tmp112)
+ call void @checkx1998(%struct.S1998* byval(%struct.S1998) align 16 %agg.tmp)
%64 = bitcast %struct.S1998* %agg.tmp113 to i8*
call void @llvm.memcpy.p0i8.p0i8.i64(i8* align 16 %64, i8* align 16 bitcast (%struct.S1998* @s1998 to i8*), i64 5168, i1 false)
%65 = bitcast %struct.S1998* %agg.tmp114 to i8*
call void @llvm.memcpy.p0i8.p0i8.i64(i8* align 16 %65, i8* align 16 bitcast (%struct.S1998* getelementptr inbounds ([5 x %struct.S1998], [5 x %struct.S1998]* @a1998, i32 0, i64 2) to i8*), i64 5168, i1 false)
%66 = bitcast %struct.S1998* %agg.tmp115 to i8*
call void @llvm.memcpy.p0i8.p0i8.i64(i8* align 16 %66, i8* align 16 bitcast (%struct.S1998* getelementptr inbounds ([5 x %struct.S1998], [5 x %struct.S1998]* @a1998, i32 0, i64 2) to i8*), i64 5168, i1 false)
- call void (i32, ...) @check1998va(i32 signext 1, double 1.000000e+00, %struct.S1998* byval align 16 %agg.tmp113, i64 2, %struct.S1998* byval align 16 %agg.tmp114, %struct.S1998* byval align 16 %agg.tmp115)
+ call void (i32, ...) @check1998va(i32 signext 1, double 1.000000e+00, %struct.S1998* byval(%struct.S1998) align 16 %agg.tmp113, i64 2, %struct.S1998* byval(%struct.S1998) align 16 %agg.tmp114, %struct.S1998* byval(%struct.S1998) align 16 %agg.tmp115)
%67 = bitcast %struct.S1998* %agg.tmp116 to i8*
call void @llvm.memcpy.p0i8.p0i8.i64(i8* align 16 %67, i8* align 16 bitcast (%struct.S1998* @s1998 to i8*), i64 5168, i1 false)
%68 = bitcast %struct.S1998* %agg.tmp117 to i8*
call void @llvm.memcpy.p0i8.p0i8.i64(i8* align 16 %69, i8* align 16 bitcast (%struct.S1998* getelementptr inbounds ([5 x %struct.S1998], [5 x %struct.S1998]* @a1998, i32 0, i64 2) to i8*), i64 5168, i1 false)
%70 = bitcast %struct.S1998* %agg.tmp119 to i8*
call void @llvm.memcpy.p0i8.p0i8.i64(i8* align 16 %70, i8* align 16 bitcast (%struct.S1998* @s1998 to i8*), i64 5168, i1 false)
- call void (i32, ...) @check1998va(i32 signext 2, %struct.S1998* byval align 16 %agg.tmp116, %struct.S1998* byval align 16 %agg.tmp117, ppc_fp128 0xM40000000000000000000000000000000, %struct.S1998* byval align 16 %agg.tmp118, %struct.S1998* byval align 16 %agg.tmp119)
+ call void (i32, ...) @check1998va(i32 signext 2, %struct.S1998* byval(%struct.S1998) align 16 %agg.tmp116, %struct.S1998* byval(%struct.S1998) align 16 %agg.tmp117, ppc_fp128 0xM40000000000000000000000000000000, %struct.S1998* byval(%struct.S1998) align 16 %agg.tmp118, %struct.S1998* byval(%struct.S1998) align 16 %agg.tmp119)
ret void
}
declare void @llvm.memset.p0i8.i64(i8* nocapture, i8, i64, i1)
declare void @llvm.memcpy.p0i8.p0i8.i64(i8* nocapture, i8* nocapture readonly, i64, i1)
-declare void @check1998(%struct.S1998* sret, %struct.S1998* byval align 16, %struct.S1998*, %struct.S1998* byval align 16)
+declare void @check1998(%struct.S1998* sret, %struct.S1998* byval(%struct.S1998) align 16, %struct.S1998*, %struct.S1998* byval(%struct.S1998) align 16)
declare void @check1998va(i32 signext, ...)
-declare void @checkx1998(%struct.S1998* byval align 16 %arg)
+declare void @checkx1998(%struct.S1998* byval(%struct.S1998) align 16 %arg)
@s2760 = external global %struct.S2760
@fails = external global i32
-define void @check2760(%struct.S2760* noalias sret %agg.result, %struct.S2760* byval align 16, %struct.S2760* %arg1, %struct.S2760* byval align 16) {
+define void @check2760(%struct.S2760* noalias sret %agg.result, %struct.S2760* byval(%struct.S2760) align 16, %struct.S2760* %arg1, %struct.S2760* byval(%struct.S2760) align 16) {
entry:
%arg0 = alloca %struct.S2760, align 32
%arg2 = alloca %struct.S2760, align 32
@barbaz = external global i32
-define void @goo(%struct.s* byval nocapture readonly %a) {
+define void @goo(%struct.s* byval(%struct.s) nocapture readonly %a) {
entry:
%x = alloca [2 x i32], align 32
%a1 = getelementptr inbounds %struct.s, %struct.s* %a, i64 0, i32 0
; CHECK-32-PIC: addic 29, 0, 12
; The large-frame-size case.
-define void @hoo(%struct.s* byval nocapture readonly %a) {
+define void @hoo(%struct.s* byval(%struct.s) nocapture readonly %a) {
entry:
%x = alloca [200000 x i32], align 32
%a1 = getelementptr inbounds %struct.s, %struct.s* %a, i64 0, i32 0
; Make sure that the FP save area is still allocated correctly relative to
; where r30 is saved.
-define void @loo(%struct.s* byval nocapture readonly %a) {
+define void @loo(%struct.s* byval(%struct.s) nocapture readonly %a) {
entry:
%x = alloca [2 x i32], align 32
%a1 = getelementptr inbounds %struct.s, %struct.s* %a, i64 0, i32 0
call void @llvm.memcpy.p0i8.p0i8.i64(i8* align 4 %5, i8* align 4 bitcast ({ i32, i16, [2 x i8] }* @caller1.p6 to i8*), i64 8, i1 false)
%6 = bitcast %struct.s7* %p7 to i8*
call void @llvm.memcpy.p0i8.p0i8.i64(i8* align 4 %6, i8* align 4 bitcast ({ i32, i16, i8, i8 }* @caller1.p7 to i8*), i64 8, i1 false)
- %call = call i32 @callee1(i32 0, i32 0, i32 0, i32 0, i32 0, i32 0, i32 0, i32 0, %struct.s1* byval %p1, %struct.s2* byval %p2, %struct.s3* byval %p3, %struct.s4* byval %p4, %struct.s5* byval %p5, %struct.s6* byval %p6, %struct.s7* byval %p7)
+ %call = call i32 @callee1(i32 0, i32 0, i32 0, i32 0, i32 0, i32 0, i32 0, i32 0, %struct.s1* byval(%struct.s1) %p1, %struct.s2* byval(%struct.s2) %p2, %struct.s3* byval(%struct.s3) %p3, %struct.s4* byval(%struct.s4) %p4, %struct.s5* byval(%struct.s5) %p5, %struct.s6* byval(%struct.s6) %p6, %struct.s7* byval(%struct.s7) %p7)
ret i32 %call
; CHECK: stb {{[0-9]+}}, 119(1)
declare void @llvm.memcpy.p0i8.p0i8.i64(i8* nocapture, i8* nocapture, i64, i1) nounwind
-define internal i32 @callee1(i32 %z1, i32 %z2, i32 %z3, i32 %z4, i32 %z5, i32 %z6, i32 %z7, i32 %z8, %struct.s1* byval %v1, %struct.s2* byval %v2, %struct.s3* byval %v3, %struct.s4* byval %v4, %struct.s5* byval %v5, %struct.s6* byval %v6, %struct.s7* byval %v7) nounwind {
+define internal i32 @callee1(i32 %z1, i32 %z2, i32 %z3, i32 %z4, i32 %z5, i32 %z6, i32 %z7, i32 %z8, %struct.s1* byval(%struct.s1) %v1, %struct.s2* byval(%struct.s2) %v2, %struct.s3* byval(%struct.s3) %v3, %struct.s4* byval(%struct.s4) %v4, %struct.s5* byval(%struct.s5) %v5, %struct.s6* byval(%struct.s6) %v6, %struct.s7* byval(%struct.s7) %v7) nounwind {
entry:
%z1.addr = alloca i32, align 4
%z2.addr = alloca i32, align 4
call void @llvm.memcpy.p0i8.p0i8.i64(i8* %5, i8* bitcast (%struct.t6* @caller2.p6 to i8*), i64 6, i1 false)
%6 = bitcast %struct.t7* %p7 to i8*
call void @llvm.memcpy.p0i8.p0i8.i64(i8* %6, i8* bitcast (%struct.t7* @caller2.p7 to i8*), i64 7, i1 false)
- %call = call i32 @callee2(i32 0, i32 0, i32 0, i32 0, i32 0, i32 0, i32 0, i32 0, %struct.t1* byval %p1, %struct.t2* byval %p2, %struct.t3* byval %p3, %struct.t4* byval %p4, %struct.t5* byval %p5, %struct.t6* byval %p6, %struct.t7* byval %p7)
+ %call = call i32 @callee2(i32 0, i32 0, i32 0, i32 0, i32 0, i32 0, i32 0, i32 0, %struct.t1* byval(%struct.t1) %p1, %struct.t2* byval(%struct.t2) %p2, %struct.t3* byval(%struct.t3) %p3, %struct.t4* byval(%struct.t4) %p4, %struct.t5* byval(%struct.t5) %p5, %struct.t6* byval(%struct.t6) %p6, %struct.t7* byval(%struct.t7) %p7)
ret i32 %call
; CHECK: stb {{[0-9]+}}, 119(1)
; CHECK: stw {{[0-9]+}}, 161(1)
}
-define internal i32 @callee2(i32 %z1, i32 %z2, i32 %z3, i32 %z4, i32 %z5, i32 %z6, i32 %z7, i32 %z8, %struct.t1* byval %v1, %struct.t2* byval %v2, %struct.t3* byval %v3, %struct.t4* byval %v4, %struct.t5* byval %v5, %struct.t6* byval %v6, %struct.t7* byval %v7) nounwind {
+define internal i32 @callee2(i32 %z1, i32 %z2, i32 %z3, i32 %z4, i32 %z5, i32 %z6, i32 %z7, i32 %z8, %struct.t1* byval(%struct.t1) %v1, %struct.t2* byval(%struct.t2) %v2, %struct.t3* byval(%struct.t3) %v3, %struct.t4* byval(%struct.t4) %v4, %struct.t5* byval(%struct.t5) %v5, %struct.t6* byval(%struct.t6) %v6, %struct.t7* byval(%struct.t7) %v7) nounwind {
entry:
%z1.addr = alloca i32, align 4
%z2.addr = alloca i32, align 4
call void @llvm.memcpy.p0i8.p0i8.i64(i8* align 4 %5, i8* align 4 bitcast ({ i32, i16, [2 x i8] }* @caller1.p6 to i8*), i64 8, i1 false)
%6 = bitcast %struct.s7* %p7 to i8*
call void @llvm.memcpy.p0i8.p0i8.i64(i8* align 4 %6, i8* align 4 bitcast ({ i32, i16, i8, i8 }* @caller1.p7 to i8*), i64 8, i1 false)
- %call = call i32 @callee1(%struct.s1* byval %p1, %struct.s2* byval %p2, %struct.s3* byval %p3, %struct.s4* byval %p4, %struct.s5* byval %p5, %struct.s6* byval %p6, %struct.s7* byval %p7)
+ %call = call i32 @callee1(%struct.s1* byval(%struct.s1) %p1, %struct.s2* byval(%struct.s2) %p2, %struct.s3* byval(%struct.s3) %p3, %struct.s4* byval(%struct.s4) %p4, %struct.s5* byval(%struct.s5) %p5, %struct.s6* byval(%struct.s6) %p6, %struct.s7* byval(%struct.s7) %p7)
ret i32 %call
; CHECK-LABEL: caller1
declare void @llvm.memcpy.p0i8.p0i8.i64(i8* nocapture, i8* nocapture, i64, i1) nounwind
-define internal i32 @callee1(%struct.s1* byval %v1, %struct.s2* byval %v2, %struct.s3* byval %v3, %struct.s4* byval %v4, %struct.s5* byval %v5, %struct.s6* byval %v6, %struct.s7* byval %v7) nounwind {
+define internal i32 @callee1(%struct.s1* byval(%struct.s1) %v1, %struct.s2* byval(%struct.s2) %v2, %struct.s3* byval(%struct.s3) %v3, %struct.s4* byval(%struct.s4) %v4, %struct.s5* byval(%struct.s5) %v5, %struct.s6* byval(%struct.s6) %v6, %struct.s7* byval(%struct.s7) %v7) nounwind {
entry:
%a = getelementptr inbounds %struct.s1, %struct.s1* %v1, i32 0, i32 0
%0 = load i8, i8* %a, align 1
call void @llvm.memcpy.p0i8.p0i8.i64(i8* %5, i8* bitcast (%struct.t6* @caller2.p6 to i8*), i64 6, i1 false)
%6 = bitcast %struct.t7* %p7 to i8*
call void @llvm.memcpy.p0i8.p0i8.i64(i8* %6, i8* bitcast (%struct.t7* @caller2.p7 to i8*), i64 7, i1 false)
- %call = call i32 @callee2(%struct.t1* byval %p1, %struct.t2* byval %p2, %struct.t3* byval %p3, %struct.t4* byval %p4, %struct.t5* byval %p5, %struct.t6* byval %p6, %struct.t7* byval %p7)
+ %call = call i32 @callee2(%struct.t1* byval(%struct.t1) %p1, %struct.t2* byval(%struct.t2) %p2, %struct.t3* byval(%struct.t3) %p3, %struct.t4* byval(%struct.t4) %p4, %struct.t5* byval(%struct.t5) %p5, %struct.t6* byval(%struct.t6) %p6, %struct.t7* byval(%struct.t7) %p7)
ret i32 %call
; CHECK-LABEL: caller2
; CHECK: lbz 3, 160(31)
}
-define internal i32 @callee2(%struct.t1* byval %v1, %struct.t2* byval %v2, %struct.t3* byval %v3, %struct.t4* byval %v4, %struct.t5* byval %v5, %struct.t6* byval %v6, %struct.t7* byval %v7) nounwind {
+define internal i32 @callee2(%struct.t1* byval(%struct.t1) %v1, %struct.t2* byval(%struct.t2) %v2, %struct.t3* byval(%struct.t3) %v3, %struct.t4* byval(%struct.t4) %v4, %struct.t5* byval(%struct.t5) %v5, %struct.t6* byval(%struct.t6) %v6, %struct.t7* byval(%struct.t7) %v7) nounwind {
entry:
%a = getelementptr inbounds %struct.t1, %struct.t1* %v1, i32 0, i32 0
%0 = load i8, i8* %a, align 1
}
; Function Attrs: nounwind
-define void @test2(i64 %d1, i64 %d2, i64 %d3, i64 %d4, i64 %d5, i64 %d6, i64 %d7, i64 %d8, %struct.s2* byval nocapture readonly %vs) #0 {
+define void @test2(i64 %d1, i64 %d2, i64 %d3, i64 %d4, i64 %d5, i64 %d6, i64 %d7, i64 %d8, %struct.s2* byval(%struct.s2) nocapture readonly %vs) #0 {
entry:
%m = getelementptr inbounds %struct.s2, %struct.s2* %vs, i64 0, i32 0
%0 = load i64, i64* %m, align 8
}
; Function Attrs: nounwind
-define void @test3(i64 %d1, i64 %d2, i64 %d3, i64 %d4, i64 %d5, i64 %d6, i64 %d7, i64 %d8, i64 %d9, %struct.s2* byval nocapture readonly %vs) #0 {
+define void @test3(i64 %d1, i64 %d2, i64 %d3, i64 %d4, i64 %d5, i64 %d6, i64 %d7, i64 %d8, i64 %d9, %struct.s2* byval(%struct.s2) nocapture readonly %vs) #0 {
entry:
%m = getelementptr inbounds %struct.s2, %struct.s2* %vs, i64 0, i32 0
%0 = load i64, i64* %m, align 8
%struct.Foo = type { i32, i32, i32, i16, i8 }
@foo = global %struct.Foo { i32 1, i32 2, i32 3, i16 4, i8 5 }, align 4
-define i32 @callee(%struct.Foo* byval %f) nounwind {
+define i32 @callee(%struct.Foo* byval(%struct.Foo) %f) nounwind {
; RV32I-LABEL: callee:
; RV32I: # %bb.0: # %entry
; RV32I-NEXT: lw a0, 0(a0)
; RV32I-NEXT: addi sp, sp, 32
; RV32I-NEXT: ret
entry:
- %call = call i32 @callee(%struct.Foo* byval @foo)
+ %call = call i32 @callee(%struct.Foo* byval(%struct.Foo) @foo)
ret void
}
%struct.large = type { i32, i32, i32, i32 }
-define i32 @callee_large_struct(%struct.large* byval align 4 %a) nounwind {
+define i32 @callee_large_struct(%struct.large* byval(%struct.large) align 4 %a) nounwind {
; RV32I-FPELIM-LABEL: callee_large_struct:
; RV32I-FPELIM: # %bb.0:
; RV32I-FPELIM-NEXT: lw a1, 0(a0)
store i32 3, i32* %c
%d = getelementptr inbounds %struct.large, %struct.large* %ls, i32 0, i32 3
store i32 4, i32* %d
- %2 = call i32 @callee_large_struct(%struct.large* byval align 4 %ls)
+ %2 = call i32 @callee_large_struct(%struct.large* byval(%struct.large) align 4 %ls)
ret i32 %2
}
%struct.large = type { i64, i64, i64, i64 }
-define i64 @callee_large_struct(%struct.large* byval align 8 %a) nounwind {
+define i64 @callee_large_struct(%struct.large* byval(%struct.large) align 8 %a) nounwind {
; RV64I-LABEL: callee_large_struct:
; RV64I: # %bb.0:
; RV64I-NEXT: ld a1, 0(a0)
store i64 3, i64* %c
%d = getelementptr inbounds %struct.large, %struct.large* %ls, i64 0, i32 3
store i64 4, i64* %d
- %2 = call i64 @callee_large_struct(%struct.large* byval align 8 %ls)
+ %2 = call i64 @callee_large_struct(%struct.large* byval(%struct.large) align 8 %ls)
ret i64 %2
}
; Byval parameters hand the function a pointer directly into the stack area
; we want to reuse during a tail call. Do not tail call optimize functions with
; byval parameters.
-declare i32 @callee_byval(i32** byval %a)
+declare i32 @callee_byval(i32** byval(i32*) %a)
define i32 @caller_byval() nounwind {
; CHECK-LABEL: caller_byval
; CHECK-NOT: tail callee_byval
; CHECK: call callee_byval
entry:
%a = alloca i32*
- %r = tail call i32 @callee_byval(i32** byval %a)
+ %r = tail call i32 @callee_byval(i32** byval(i32*) %a)
ret i32 %r
}
;CHECK: st
;CHECK: st
;CHECK: bar
- %0 = tail call i32 @bar(%struct.foo_t* byval @s) nounwind
+ %0 = tail call i32 @bar(%struct.foo_t* byval(%struct.foo_t) @s) nounwind
ret i32 %0
}
-declare i32 @bar(%struct.foo_t* byval)
+declare i32 @bar(%struct.foo_t* byval(%struct.foo_t))
; CHECK-NEXT: nop
; CHECK-NEXT: nop
; CHECK-NEXT: nop
-define double @test_1(double* byval %a, double* byval %b) {
+define double @test_1(double* byval(double) %a, double* byval(double) %b) {
entry:
%0 = load double, double* %a, align 8
%1 = load double, double* %b, align 8
; CHECK-NEXT: nop
; CHECK-NEXT: nop
; CHECK-NEXT: nop
-define double @test_2(double* byval %a) {
+define double @test_2(double* byval(double) %a) {
entry:
%0 = load double, double* %a, align 8
%1 = call double @llvm.sqrt.f64(double %0) nounwind
; RUN: llc < %s -O1 -march=sparc -mcpu=leon4 | FileCheck %s -check-prefix=LEON3_4_ITIN
; NO_ITIN-LABEL: f32_ops:
-; NO_ITIN: ld
-; NO_ITIN-NEXT: ld
-; NO_ITIN-NEXT: ld
-; NO_ITIN-NEXT: ld
-; NO_ITIN-NEXT: fadds
-; NO_ITIN-NEXT: fsubs
-; NO_ITIN-NEXT: fmuls
-; NO_ITIN-NEXT: retl
-; NO_ITIN-NEXT: fdivs
+; NO_ITIN: ld
+; NO_ITIN-NEXT: ld
+; NO_ITIN-NEXT: ld
+; NO_ITIN-NEXT: ld
+; NO_ITIN-NEXT: fadds
+; NO_ITIN-NEXT: fsubs
+; NO_ITIN-NEXT: fmuls
+; NO_ITIN-NEXT: retl
+; NO_ITIN-NEXT: fdivs
; LEON2_ITIN-LABEL: f32_ops:
-; LEON2_ITIN: ld
-; LEON2_ITIN-NEXT: ld
-; LEON2_ITIN-NEXT: fadds
-; LEON2_ITIN-NEXT: ld
-; LEON2_ITIN-NEXT: fsubs
-; LEON2_ITIN-NEXT: ld
-; LEON2_ITIN-NEXT: fmuls
-; LEON2_ITIN-NEXT: retl
-; LEON2_ITIN-NEXT: fdivs
+; LEON2_ITIN: ld
+; LEON2_ITIN-NEXT: ld
+; LEON2_ITIN-NEXT: fadds
+; LEON2_ITIN-NEXT: ld
+; LEON2_ITIN-NEXT: fsubs
+; LEON2_ITIN-NEXT: ld
+; LEON2_ITIN-NEXT: fmuls
+; LEON2_ITIN-NEXT: retl
+; LEON2_ITIN-NEXT: fdivs
; LEON3_4_ITIN-LABEL: f32_ops:
-; LEON3_4_ITIN: ld
-; LEON3_4_ITIN-NEXT: ld
-; LEON3_4_ITIN-NEXT: ld
-; LEON3_4_ITIN-NEXT: fadds
-; LEON3_4_ITIN-NEXT: ld
-; LEON3_4_ITIN-NEXT: fsubs
-; LEON3_4_ITIN-NEXT: fmuls
-; LEON3_4_ITIN-NEXT: retl
-; LEON3_4_ITIN-NEXT: fdivs
+; LEON3_4_ITIN: ld
+; LEON3_4_ITIN-NEXT: ld
+; LEON3_4_ITIN-NEXT: ld
+; LEON3_4_ITIN-NEXT: fadds
+; LEON3_4_ITIN-NEXT: ld
+; LEON3_4_ITIN-NEXT: fsubs
+; LEON3_4_ITIN-NEXT: fmuls
+; LEON3_4_ITIN-NEXT: retl
+; LEON3_4_ITIN-NEXT: fdivs
-define float @f32_ops(float* byval %a, float* byval %b, float* byval %c, float* byval %d) {
+define float @f32_ops(float* byval(float) %a, float* byval(float) %b, float* byval(float) %c, float* byval(float) %d) {
entry:
%0 = load float, float* %a, align 8
%1 = load float, float* %b, align 8
; CHECK: std
; CHECK: std
-define void @f128_ops(fp128* noalias sret %scalar.result, fp128* byval %a, fp128* byval %b, fp128* byval %c, fp128* byval %d) {
+define void @f128_ops(fp128* noalias sret %scalar.result, fp128* byval(fp128) %a, fp128* byval(fp128) %b, fp128* byval(fp128) %c, fp128* byval(fp128) %d) {
entry:
%0 = load fp128, fp128* %a, align 8
%1 = load fp128, fp128* %b, align 8
; CHECK-DAG: ldd [%[[S1]]], %f{{.+}}
; CHECK: jmp {{%[oi]7}}+12
-define void @f128_spill(fp128* noalias sret %scalar.result, fp128* byval %a) {
+define void @f128_spill(fp128* noalias sret %scalar.result, fp128* byval(fp128) %a) {
entry:
%0 = load fp128, fp128* %a, align 8
call void asm sideeffect "", "~{f0},~{f1},~{f2},~{f3},~{f4},~{f5},~{f6},~{f7},~{f8},~{f9},~{f10},~{f11},~{f12},~{f13},~{f14},~{f15},~{f16},~{f17},~{f18},~{f19},~{f20},~{f21},~{f22},~{f23},~{f24},~{f25},~{f26},~{f27},~{f28},~{f29},~{f30},~{f31}"()
; CHECK-NEXT: add %g1, %sp, %g1
; CHECK-NEXT: ldd [%g1+8], %f{{.+}}
-define void @f128_spill_large(<251 x fp128>* noalias sret %scalar.result, <251 x fp128>* byval %a) {
+define void @f128_spill_large(<251 x fp128>* noalias sret %scalar.result, <251 x fp128>* byval(<251 x fp128>) %a) {
entry:
%0 = load <251 x fp128>, <251 x fp128>* %a, align 8
call void asm sideeffect "", "~{f0},~{f1},~{f2},~{f3},~{f4},~{f5},~{f6},~{f7},~{f8},~{f9},~{f10},~{f11},~{f12},~{f13},~{f14},~{f15},~{f16},~{f17},~{f18},~{f19},~{f20},~{f21},~{f22},~{f23},~{f24},~{f25},~{f26},~{f27},~{f28},~{f29},~{f30},~{f31}"()
; HARD-NEXT: nop
; SOFT: _Q_cmp
-define i32 @f128_compare(fp128* byval %f0, fp128* byval %f1, i32 %a, i32 %b) {
+define i32 @f128_compare(fp128* byval(fp128) %f0, fp128* byval(fp128) %f1, i32 %a, i32 %b) {
entry:
%0 = load fp128, fp128* %f0, align 8
%1 = load fp128, fp128* %f1, align 8
; SOFT: _Q_cmp
; SOFT: cmp
-define i32 @f128_compare2(fp128* byval %f0) {
+define i32 @f128_compare2(fp128* byval(fp128) %f0) {
entry:
%0 = load fp128, fp128* %f0, align 8
%1 = fcmp ogt fp128 %0, 0xL00000000000000000000000000000000
; BE: fabss %f0, %f0
; EL: fabss %f3, %f3
-define void @f128_abs(fp128* noalias sret %scalar.result, fp128* byval %a) {
+define void @f128_abs(fp128* noalias sret %scalar.result, fp128* byval(fp128) %a) {
entry:
%0 = load fp128, fp128* %a, align 8
%1 = tail call fp128 @llvm.fabs.f128(fp128 %0)
; BE: fnegs %f0, %f0
; EL: fnegs %f3, %f3
-define void @f128_neg(fp128* noalias sret %scalar.result, fp128* byval %a) {
+define void @f128_neg(fp128* noalias sret %scalar.result, fp128* byval(fp128) %a) {
entry:
%0 = load fp128, fp128* %a, align 8
%1 = fsub fp128 0xL00000000000000008000000000000000, %0
; V9: st %o0, [%[[R]]+{{.+}}]
; Function Attrs: nounwind
-define i32 @foo(%struct.jmpbuf_env* byval %inbuf) #0 {
+define i32 @foo(%struct.jmpbuf_env* byval(%struct.jmpbuf_env) %inbuf) #0 {
entry:
%0 = getelementptr inbounds %struct.jmpbuf_env, %struct.jmpbuf_env* %inbuf, i32 0, i32 0
store i32 0, i32* %0, align 4, !tbaa !4
%i.addr = alloca i32, align 4
store i32 %i, i32* %i.addr, align 4
%0 = bitcast i32* %i.addr to %struct.S*
- call void @struct_ptr_fn(%struct.S* byval align 1 %0)
+ call void @struct_ptr_fn(%struct.S* byval(%struct.S) align 1 %0)
ret void
}
-declare void @struct_ptr_fn(%struct.S* byval align 1)
+declare void @struct_ptr_fn(%struct.S* byval(%struct.S) align 1)
; CHECK-LABEL: struct_test
; CHECK: call struct_fn
define void @struct_test() {
entry:
- tail call void @struct_fn(%struct.U* byval align 1 getelementptr inbounds ([1 x %struct.U], [1 x %struct.U]* @a, i32 0, i32 0))
+ tail call void @struct_fn(%struct.U* byval(%struct.U) align 1 getelementptr inbounds ([1 x %struct.U], [1 x %struct.U]* @a, i32 0, i32 0))
ret void
}
; CHECK-NEXT: nop
; CHECK-NEXT: ret
-declare void @struct_fn(%struct.U* byval align 1)
+declare void @struct_fn(%struct.U* byval(%struct.U) align 1)
@b = internal global [1 x %struct.U] zeroinitializer, align 1
define void @struct_arg_test() {
entry:
- tail call void @struct_arg_fn(%struct.U* byval align 1 getelementptr inbounds ([1 x %struct.U], [1 x %struct.U]* @b, i32 0, i32 0))
+ tail call void @struct_arg_fn(%struct.U* byval(%struct.U) align 1 getelementptr inbounds ([1 x %struct.U], [1 x %struct.U]* @b, i32 0, i32 0))
ret void
}
-declare void @struct_arg_fn(%struct.U* byval align 1)
+declare void @struct_arg_fn(%struct.U* byval(%struct.U) align 1)
%tmp20 = bitcast %struct.RRRRRRRR* %agg.tmp16 to i8*
%tmp21 = bitcast %struct.RRRRRRRR* %arrayidx19 to i8*
call void @llvm.memcpy.p0i8.p0i8.i32(i8* align 4 %tmp20, i8* align 4 %tmp21, i32 312, i1 false)
- call void (i8*, i32, i8*, i8*, ...) @CLLoggingLog(i8* %tmp, i32 2, i8* getelementptr inbounds ([62 x i8], [62 x i8]* @__PRETTY_FUNCTION__._ZN12CLGll, i32 0, i32 0), i8* getelementptr inbounds ([75 x i8], [75 x i8]* @.str, i32 0, i32 0), %struct.RRRRRRRR* byval %agg.tmp, %struct.RRRRRRRR* byval %agg.tmp4, %struct.RRRRRRRR* byval %agg.tmp10, %struct.RRRRRRRR* byval %agg.tmp16)
+ call void (i8*, i32, i8*, i8*, ...) @CLLoggingLog(i8* %tmp, i32 2, i8* getelementptr inbounds ([62 x i8], [62 x i8]* @__PRETTY_FUNCTION__._ZN12CLGll, i32 0, i32 0), i8* getelementptr inbounds ([75 x i8], [75 x i8]* @.str, i32 0, i32 0), %struct.RRRRRRRR* byval(%struct.RRRRRRRR) %agg.tmp, %struct.RRRRRRRR* byval(%struct.RRRRRRRR) %agg.tmp4, %struct.RRRRRRRR* byval(%struct.RRRRRRRR) %agg.tmp10, %struct.RRRRRRRR* byval(%struct.RRRRRRRR) %agg.tmp16)
br label %do.end
do.end: ; preds = %do.body
%c = alloca %struct.C, align 1
%0 = getelementptr inbounds %struct.C, %struct.C* %c, i32 0, i32 0, i32 0
call void @llvm.lifetime.start.p0i8(i64 1000, i8* %0) #1
- call void @use_C(%struct.C* byval %c) #3
+ call void @use_C(%struct.C* byval(%struct.C) %c) #3
call void @llvm.lifetime.end.p0i8(i64 1000, i8* %0) #1
ret void
}
%s = alloca %struct.S, align 2
%0 = bitcast %struct.S* %s to i8*
call void @llvm.lifetime.start.p0i8(i64 2000, i8* %0) #1
- call void @use_S(%struct.S* byval %s) #3
+ call void @use_S(%struct.S* byval(%struct.S) %s) #3
call void @llvm.lifetime.end.p0i8(i64 2000, i8* %0) #1
ret void
}
%i = alloca %struct.I, align 4
%0 = bitcast %struct.I* %i to i8*
call void @llvm.lifetime.start.p0i8(i64 4000, i8* %0) #1
- call void @use_I(%struct.I* byval %i) #3
+ call void @use_I(%struct.I* byval(%struct.I) %i) #3
call void @llvm.lifetime.end.p0i8(i64 4000, i8* %0) #1
ret void
}
-declare void @use_C(%struct.C* byval) #2
-declare void @use_S(%struct.S* byval) #2
-declare void @use_I(%struct.I* byval) #2
+declare void @use_C(%struct.C* byval(%struct.C)) #2
+declare void @use_S(%struct.S* byval(%struct.S)) #2
+declare void @use_I(%struct.I* byval(%struct.I)) #2
declare void @llvm.lifetime.start.p0i8(i64, i8* nocapture) #1
declare void @llvm.lifetime.end.p0i8(i64, i8* nocapture) #1
ret void
}
-define void @arg_emergency_spill(i32 %n, i32 %n2, i32 %n3, i32 %n4, [252 x i32]* byval %p) {
+define void @arg_emergency_spill(i32 %n, i32 %n2, i32 %n3, i32 %n4, [252 x i32]* byval([252 x i32]) %p) {
; CHECK-LABEL: arg_emergency_spill:
; CHECK: @ %bb.0: @ %entry
; CHECK-NEXT: .save {r4, r5, r6, r7, lr}
; We currently overestimate the amount of required stack space by 16 bytes,
; so this is the largest stack that doesn't require an emergency spill slot.
-define void @arg_no_emergency_spill(i32 %n, i32 %n2, i32 %n3, i32 %n4, [248 x i32]* byval %p) {
+define void @arg_no_emergency_spill(i32 %n, i32 %n2, i32 %n3, i32 %n4, [248 x i32]* byval([248 x i32]) %p) {
; CHECK-LABEL: arg_no_emergency_spill:
; CHECK: @ %bb.0: @ %entry
; CHECK-NEXT: .save {r4, r5, r6, r7, lr}
ret void
}
-define void @aligned_emergency_spill(i32 %n, i32 %n2, i32 %n3, i32 %n4, [31 x i32]* byval %p) {
+define void @aligned_emergency_spill(i32 %n, i32 %n2, i32 %n3, i32 %n4, [31 x i32]* byval([31 x i32]) %p) {
; CHECK-LABEL: aligned_emergency_spill:
; CHECK: @ %bb.0: @ %entry
; CHECK-NEXT: .save {r4, r5, r6, r7, lr}
; This function should have no emergency spill slot, so its stack should be
; smaller than @aligned_emergency_spill.
-define void @aligned_no_emergency_spill(i32 %n, i32 %n2, i32 %n3, i32 %n4, [30 x i32]* byval %p) {
+define void @aligned_no_emergency_spill(i32 %n, i32 %n2, i32 %n3, i32 %n4, [30 x i32]* byval([30 x i32]) %p) {
; CHECK-LABEL: aligned_no_emergency_spill:
; CHECK: @ %bb.0: @ %entry
; CHECK-NEXT: .save {r4, r5, r6, r7, lr}
; so we don't generate code that requires an emergency spill slot we never
; allocated. If the store gets eliminated, this testcase probably needs
; to be rewritten.)
-define void @aligned_out_of_range_access(i32 %n, i32 %n2, i32 %n3, i32 %n4, [30 x i32]* byval %p) {
+define void @aligned_out_of_range_access(i32 %n, i32 %n2, i32 %n3, i32 %n4, [30 x i32]* byval([30 x i32]) %p) {
; CHECK-LABEL: aligned_out_of_range_access:
; CHECK: @ %bb.0: @ %entry
; CHECK-NEXT: .save {r4, r5, r6, r7, lr}
declare void @llvm.va_start(i8*)
declare dso_local i32 @g(i32*, i32, i32, i32, i32, i32) local_unnamed_addr
-declare dso_local i32 @f(i32*, i32, i32, i32, %struct.S* byval align 4) local_unnamed_addr
+declare dso_local i32 @f(i32*, i32, i32, i32, %struct.S* byval(%struct.S) align 4) local_unnamed_addr
declare dso_local i32 @h(i32*, i32*, i32*) local_unnamed_addr
-declare dso_local i32 @u(i32*, i32*, i32*, %struct.S* byval align 4, %struct.S* byval align 4) local_unnamed_addr
+declare dso_local i32 @u(i32*, i32*, i32*, %struct.S* byval(%struct.S) align 4, %struct.S* byval(%struct.S) align 4) local_unnamed_addr
;
; Test access to arguments, passed on stack (including varargs)
%arraydecay = getelementptr inbounds [4 x i32], [4 x i32]* %v, i32 0, i32 0
%add = add nsw i32 %c, %b
%add1 = add nsw i32 %add, %d
- %call = call i32 @f(i32* nonnull %arraydecay, i32 %a, i32 %add1, i32 %e, %struct.S* byval nonnull align 4 @s)
+ %call = call i32 @f(i32* nonnull %arraydecay, i32 %a, i32 %add1, i32 %e, %struct.S* byval(%struct.S) nonnull align 4 @s)
%add.ptr = getelementptr inbounds [4 x i32], [4 x i32]* %v, i32 0, i32 1
%add.ptr5 = getelementptr inbounds [4 x i32], [4 x i32]* %v, i32 0, i32 2
%call6 = call i32 @h(i32* nonnull %arraydecay, i32* nonnull %add.ptr, i32* nonnull %add.ptr5)
%1 = bitcast %struct.__va_list* %ap to i8*
call void @llvm.va_start(i8* nonnull %1)
%arraydecay = getelementptr inbounds [4 x i32], [4 x i32]* %v, i32 0, i32 0
- %call = call i32 @f(i32* nonnull %arraydecay, i32 %a, i32 0, i32 0, %struct.S* byval nonnull align 4 @s)
+ %call = call i32 @f(i32* nonnull %arraydecay, i32 %a, i32 0, i32 0, %struct.S* byval(%struct.S) nonnull align 4 @s)
%add.ptr = getelementptr inbounds [4 x i32], [4 x i32]* %v, i32 0, i32 1
%add.ptr5 = getelementptr inbounds [4 x i32], [4 x i32]* %v, i32 0, i32 2
%call6 = call i32 @h(i32* nonnull %arraydecay, i32* nonnull %add.ptr, i32* nonnull %add.ptr5)
%2 = bitcast i32* %y to i8*
%3 = bitcast i32* %z to i8*
%arraydecay = getelementptr inbounds [4 x i32], [4 x i32]* %v, i32 0, i32 0
- %call = call i32 @u(i32* nonnull %arraydecay, i32* nonnull %x, i32* nonnull %y, %struct.S* byval nonnull align 4 @s, %struct.S* byval nonnull align 4 @s)
- %call2 = call i32 @u(i32* nonnull %arraydecay, i32* nonnull %y, i32* nonnull %z, %struct.S* byval nonnull align 4 @s, %struct.S* byval nonnull align 4 @s)
+ %call = call i32 @u(i32* nonnull %arraydecay, i32* nonnull %x, i32* nonnull %y, %struct.S* byval(%struct.S) nonnull align 4 @s, %struct.S* byval(%struct.S) nonnull align 4 @s)
+ %call2 = call i32 @u(i32* nonnull %arraydecay, i32* nonnull %y, i32* nonnull %z, %struct.S* byval(%struct.S) nonnull align 4 @s, %struct.S* byval(%struct.S) nonnull align 4 @s)
%add = add nsw i32 %call2, %call
ret i32 %add
}
%EmptyStruct = type { }
declare void @ext_func(%SmallStruct*)
-declare void @ext_func_empty(%EmptyStruct* byval)
-declare void @ext_byval_func(%SmallStruct* byval)
-declare void @ext_byval_func_align8(%SmallStruct* byval align 8)
-declare void @ext_byval_func_alignedstruct(%AlignedStruct* byval)
-declare void @ext_byval_func_empty(%EmptyStruct* byval)
+declare void @ext_func_empty(%EmptyStruct* byval(%EmptyStruct))
+declare void @ext_byval_func(%SmallStruct* byval(%SmallStruct))
+declare void @ext_byval_func_align8(%SmallStruct* byval(%SmallStruct) align 8)
+declare void @ext_byval_func_alignedstruct(%AlignedStruct* byval(%AlignedStruct))
+declare void @ext_byval_func_empty(%EmptyStruct* byval(%EmptyStruct))
; CHECK-LABEL: byval_arg
define void @byval_arg(%SmallStruct* %ptr) {
; CHECK-NEXT: i32.const $push[[L5:.+]]=, 12{{$}}
; CHECK-NEXT: i32.add $push[[ARG:.+]]=, $[[SP]], $pop[[L5]]{{$}}
; CHECK-NEXT: call ext_byval_func, $pop[[ARG]]{{$}}
- call void @ext_byval_func(%SmallStruct* byval %ptr)
+ call void @ext_byval_func(%SmallStruct* byval(%SmallStruct) %ptr)
; Restore the stack
; CHECK-NEXT: i32.const $push[[L6:.+]]=, 16
; CHECK-NEXT: i32.add $push[[L8:.+]]=, $[[SP]], $pop[[L6]]
; CHECK-NEXT: i32.const $push[[L5:.+]]=, 8{{$}}
; CHECK-NEXT: i32.add $push[[ARG:.+]]=, $[[SP]], $pop[[L5]]{{$}}
; CHECK-NEXT: call ext_byval_func_align8, $pop[[ARG]]{{$}}
- call void @ext_byval_func_align8(%SmallStruct* byval align 8 %ptr)
+ call void @ext_byval_func_align8(%SmallStruct* byval(%SmallStruct) align 8 %ptr)
ret void
}
; CHECK-NEXT: i64.store 0($[[SP]]), $pop[[L4]]
; Pass a pointer to the stack slot to the function
; CHECK-NEXT: call ext_byval_func_alignedstruct, $[[SP]]
- tail call void @ext_byval_func_alignedstruct(%AlignedStruct* byval %ptr)
+ tail call void @ext_byval_func_alignedstruct(%AlignedStruct* byval(%AlignedStruct) %ptr)
ret void
}
; CHECK-LABEL: byval_param
-define void @byval_param(%SmallStruct* byval align 32 %ptr) {
+define void @byval_param(%SmallStruct* byval(%SmallStruct) align 32 %ptr) {
; CHECK: .functype byval_param (i32) -> ()
; %ptr is just a pointer to a struct, so pass it directly through
; CHECK: call ext_func, $0
define void @byval_empty_caller(%EmptyStruct* %ptr) {
; CHECK: .functype byval_empty_caller (i32) -> ()
; CHECK: call ext_byval_func_empty, $0
- call void @ext_byval_func_empty(%EmptyStruct* byval %ptr)
+ call void @ext_byval_func_empty(%EmptyStruct* byval(%EmptyStruct) %ptr)
ret void
}
; CHECK-LABEL: byval_empty_callee
-define void @byval_empty_callee(%EmptyStruct* byval %ptr) {
+define void @byval_empty_callee(%EmptyStruct* byval(%EmptyStruct) %ptr) {
; CHECK: .functype byval_empty_callee (i32) -> ()
; CHECK: call ext_func_empty, $0
call void @ext_func_empty(%EmptyStruct* %ptr)
; CHECK-NEXT: local.tee $push[[L9:.+]]=, $[[SP:.+]]=, $pop[[L11]]{{$}}
; CHECK-NEXT: call big_byval_callee,
%big = type [131072 x i8]
-declare void @big_byval_callee(%big* byval align 1)
-define void @big_byval(%big* byval align 1 %x) {
- call void @big_byval_callee(%big* byval align 1 %x)
+declare void @big_byval_callee(%big* byval(%big) align 1)
+define void @big_byval(%big* byval(%big) align 1 %x) {
+ call void @big_byval_callee(%big* byval(%big) align 1 %x)
ret void
}
declare i32 @extern_ijidf(i64, i32, double, float) #1
-declare void @extern_struct(%struct.big* byval align 8) #1
+declare void @extern_struct(%struct.big* byval(%struct.big) align 8) #1
declare void @extern_sret(%struct.big* sret) #1
; CHECK-LABEL: mismatched_byval:
; CHECK: i32.store
; CHECK: return_call quux, $pop{{[0-9]+}}{{$}}
-declare i32 @quux(i32* byval)
+declare i32 @quux(i32* byval(i32))
define i32 @mismatched_byval(i32* %x) {
- %v = tail call i32 @quux(i32* byval %x)
+ %v = tail call i32 @quux(i32* byval(i32) %x)
ret i32 %v
}
%struct.S63 = type { [63 x i8] }
@g1s63 = external global %struct.S63 ; <%struct.S63*> [#uses=1]
-declare void @test63(%struct.S63* byval align 4 ) nounwind
+declare void @test63(%struct.S63* byval(%struct.S63) align 4 ) nounwind
define void @testit63_entry_2E_ce() nounwind {
; CHECK-LABEL: testit63_entry_2E_ce:
; CHECK-NEXT: popl %esi
; CHECK-NEXT: popl %edi
; CHECK-NEXT: retl
- tail call void @test63( %struct.S63* byval align 4 @g1s63 ) nounwind
+ tail call void @test63(%struct.S63* byval(%struct.S63) align 4 @g1s63) nounwind
ret void
}
store i8 %5, i8* %7, align 1
%8 = getelementptr %struct.X, %struct.X* %xxx, i32 0, i32 0 ; <i8*> [#uses=1]
store i8 15, i8* %8, align 1
- %9 = call i32 (...) bitcast (i32 (%struct.X*, %struct.X*)* @f to i32 (...)*)(%struct.X* byval align 4 %xxx, %struct.X* byval align 4 %xxx) nounwind ; <i32> [#uses=1]
+ %9 = call i32 (...) bitcast (i32 (%struct.X*, %struct.X*)* @f to i32 (...)*)(%struct.X* byval(%struct.X) align 4 %xxx, %struct.X* byval(%struct.X) align 4 %xxx) nounwind ; <i32> [#uses=1]
store i32 %9, i32* %0, align 4
%10 = load i32, i32* %0, align 4 ; <i32> [#uses=1]
store i32 %10, i32* %retval, align 4
ret i32 %retval1
}
-declare i32 @f(%struct.X* byval align 4, %struct.X* byval align 4) nounwind ssp
+declare i32 @f(%struct.X* byval(%struct.X) align 4, %struct.X* byval(%struct.X) align 4) nounwind ssp
%struct.JVTLib_100487 = type <{ i8 }>
-define i32 @_Z13JVTLib_10335613JVTLib_10266513JVTLib_100579S_S_S_jPhj(i16* nocapture %ResidualX_Array.0, %struct.JVTLib_100487* nocapture byval align 4 %xqp, i16* nocapture %ResidualL_Array.0, i16* %ResidualDCZ_Array.0, i16* nocapture %ResidualACZ_FOArray.0, i32 %useFRextDequant, i8* nocapture %JVTLib_103357, i32 %use_field_scan) ssp {
+define i32 @_Z13JVTLib_10335613JVTLib_10266513JVTLib_100579S_S_S_jPhj(i16* nocapture %ResidualX_Array.0, %struct.JVTLib_100487* nocapture byval(%struct.JVTLib_100487) align 4 %xqp, i16* nocapture %ResidualL_Array.0, i16* %ResidualDCZ_Array.0, i16* nocapture %ResidualACZ_FOArray.0, i32 %useFRextDequant, i8* nocapture %JVTLib_103357, i32 %use_field_scan) ssp {
bb.nph:
%0 = shl i32 undef, 1 ; <i32> [#uses=2]
%mask133.masked.masked.masked.masked.masked.masked = or i640 undef, undef ; <i640> [#uses=1]
%struct.Pt = type { double, double }
%struct.Rect = type { %struct.Pt, %struct.Pt }
-define double @foo(%struct.Rect* byval %my_r0) nounwind ssp !dbg !1 {
+define double @foo(%struct.Rect* byval(%struct.Rect) %my_r0) nounwind ssp !dbg !1 {
entry:
%retval = alloca double ; <double*> [#uses=2]
%0 = alloca double ; <double*> [#uses=2]
; CHECK: movl %[[reg]],{{.*}}(%ebp) ## 4-byte Spill
; CHECK: calll __Z6throwsv
-define i8* @_Z4test1SiS_(%struct.S* byval %s1, i32 %n, %struct.S* byval %s2) ssp personality i8* bitcast (i32 (...)* @__gxx_personality_v0 to i8*) {
+define i8* @_Z4test1SiS_(%struct.S* byval(%struct.S) %s1, i32 %n, %struct.S* byval(%struct.S) %s2) ssp personality i8* bitcast (i32 (...)* @__gxx_personality_v0 to i8*) {
entry:
%retval = alloca i8*, align 4 ; <i8**> [#uses=2]
%n.addr = alloca i32, align 4 ; <i32*> [#uses=1]
%struct.T0 = type {}
-define void @fn4(%struct.T0* byval %arg0) nounwind ssp {
+define void @fn4(%struct.T0* byval(%struct.T0) %arg0) nounwind ssp {
entry:
ret void
}
}
; Check that we fallback on byVal argument
-; FALLBACK-WITH-REPORT-ERR: remark: <unknown>:0:0: unable to translate instruction: call: ' call void @ScaleObjectOverwrite_3(%struct.PointListStruct* %index, %struct.PointListStruct* byval %index)' (in function: ScaleObjectOverwrite_2)
+; FALLBACK-WITH-REPORT-ERR: remark: <unknown>:0:0: unable to translate instruction: call: ' call void @ScaleObjectOverwrite_3(%struct.PointListStruct* %index, %struct.PointListStruct* byval(%struct.PointListStruct) %index)' (in function: ScaleObjectOverwrite_2)
; FALLBACK-WITH-REPORT-ERR: warning: Instruction selection used fallback path for ScaleObjectOverwrite_2
; FALLBACK-WITH-REPORT-OUT-LABEL: ScaleObjectOverwrite_2:
%struct.PointListStruct = type { i8*, i8* }
-declare void @ScaleObjectOverwrite_3(%struct.PointListStruct* %index, %struct.PointListStruct* byval %index2)
+declare void @ScaleObjectOverwrite_3(%struct.PointListStruct* %index, %struct.PointListStruct* byval(%struct.PointListStruct) %index2)
define void @ScaleObjectOverwrite_2(%struct.PointListStruct* %index) {
entry:
- call void @ScaleObjectOverwrite_3(%struct.PointListStruct* %index, %struct.PointListStruct* byval %index)
+ call void @ScaleObjectOverwrite_3(%struct.PointListStruct* %index, %struct.PointListStruct* byval(%struct.PointListStruct) %index)
ret void
}
%struct.__va_list_tag = type { i32, i32, i8*, i8* }
; Function Attrs: nounwind uwtable
-define void @bar(%struct.Baz* byval nocapture readnone align 8 %x, ...) {
+define void @bar(%struct.Baz* byval(%struct.Baz) nocapture readnone align 8 %x, ...) {
entry:
%va = alloca [1 x %struct.__va_list_tag], align 16
%arraydecay = getelementptr inbounds [1 x %struct.__va_list_tag], [1 x %struct.__va_list_tag]* %va, i64 0, i64 0
; CHECK: retl
-define void @avoid_byval(i32* byval %x) {
+define void @avoid_byval(i32* byval(i32) %x) {
entry:
%x.p.p = alloca i32*
store i32* %x, i32** %x.p.p
%struct.S6 = type { [4 x i32], i32, i32, i32, i32 }
; Function Attrs: nounwind uwtable
-define void @test_stack(%struct.S6* noalias nocapture sret %agg.result, %struct.S6* byval nocapture readnone align 8 %s1, %struct.S6* byval nocapture align 8 %s2, i32 %x) local_unnamed_addr #0 {
+define void @test_stack(%struct.S6* noalias nocapture sret %agg.result, %struct.S6* byval(%struct.S6) nocapture readnone align 8 %s1, %struct.S6* byval(%struct.S6) nocapture align 8 %s2, i32 %x) local_unnamed_addr #0 {
; CHECK-LABEL: test_stack:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: movq %rdi, %rax
@.str3 = private constant [7 x i8] c"test.c\00", align 1 ; <[7 x i8]*> [#uses=1]
@__PRETTY_FUNCTION__.2067 = internal constant [13 x i8] c"aligned_func\00" ; <[13 x i8]*> [#uses=1]
-define void @aligned_func(%struct.S* byval align 64 %obj) nounwind {
+define void @aligned_func(%struct.S* byval(%struct.S) align 64 %obj) nounwind {
entry:
%ptr = alloca i8* ; <i8**> [#uses=3]
%p = alloca i64 ; <i64*> [#uses=3]
%"alloca point" = bitcast i32 0 to i32 ; <i32> [#uses=0]
%0 = getelementptr inbounds %struct.S, %struct.S* %s1, i32 0, i32 0 ; <i32*> [#uses=1]
store i32 1, i32* %0, align 4
- call void @aligned_func(%struct.S* byval align 64 %s1) nounwind
+ call void @aligned_func(%struct.S* byval(%struct.S) align 64 %s1) nounwind
br label %return
return: ; preds = %entry
%struct.s = type { i64, i64, i64 }
-define i64 @f(%struct.s* byval %a) {
+define i64 @f(%struct.s* byval(%struct.s) %a) {
; X64-LABEL: f:
; X64: # %bb.0: # %entry
; X64-NEXT: movq 8(%rsp), %rax
store i64 %b, i64* %tmp2, align 16
%tmp4 = getelementptr %struct.s, %struct.s* %d, i32 0, i32 2
store i64 %c, i64* %tmp4, align 16
- call void @f( %struct.s* byval %d )
- call void @f( %struct.s* byval %d )
+ call void @f(%struct.s* byval(%struct.s) %d)
+ call void @f(%struct.s* byval(%struct.s) %d)
ret void
}
-declare void @f(%struct.s* byval)
+declare void @f(%struct.s* byval(%struct.s))
store i32 %a5, i32* %tmp8, align 16
%tmp10 = getelementptr %struct.s, %struct.s* %d, i32 0, i32 5
store i32 %a6, i32* %tmp10, align 16
- call void @f( %struct.s* byval %d)
- call void @f( %struct.s* byval %d)
+ call void @f(%struct.s* byval(%struct.s) %d)
+ call void @f(%struct.s* byval(%struct.s) %d)
ret void
}
-declare void @f(%struct.s* byval)
+declare void @f(%struct.s* byval(%struct.s))
store i16 %a5, i16* %tmp8, align 16
%tmp10 = getelementptr %struct.s, %struct.s* %a, i32 0, i32 5
store i16 %a6, i16* %tmp10, align 16
- call void @f( %struct.s* byval %a )
- call void @f( %struct.s* byval %a )
+ call void @f(%struct.s* byval(%struct.s) %a)
+ call void @f(%struct.s* byval(%struct.s) %a)
ret void
}
-declare void @f(%struct.s* byval)
+declare void @f(%struct.s* byval(%struct.s))
store i8 %a5, i8* %tmp8, align 8
%tmp10 = getelementptr %struct.s, %struct.s* %a, i32 0, i32 5
store i8 %a6, i8* %tmp10, align 8
- call void @f( %struct.s* byval %a )
- call void @f( %struct.s* byval %a )
+ call void @f(%struct.s* byval(%struct.s) %a)
+ call void @f(%struct.s* byval(%struct.s) %a)
ret void
}
-declare void @f(%struct.s* byval)
+declare void @f(%struct.s* byval(%struct.s))
; CHECK-NEXT: popl %ebx
; CHECK-NEXT: retl
entry:
- tail call void (i32, ...) @bar( i32 3, %struct.W* byval @.cpx ) nounwind
- tail call void (i32, ...) @baz( i32 3, %struct.W* byval @B ) nounwind
+ tail call void (i32, ...) @bar( i32 3, %struct.W* byval(%struct.W) @.cpx ) nounwind
+ tail call void (i32, ...) @baz( i32 3, %struct.W* byval(%struct.W) @B ) nounwind
ret i32 undef
}
%s = alloca %struct.S ; <%struct.S*> [#uses=2]
%tmp15 = getelementptr %struct.S, %struct.S* %s, i32 0, i32 0 ; <<2 x i64>*> [#uses=1]
store <2 x i64> < i64 8589934595, i64 1 >, <2 x i64>* %tmp15, align 16
- call void @t( i32 1, %struct.S* byval %s ) nounwind
+ call void @t( i32 1, %struct.S* byval(%struct.S) %s) nounwind
ret i32 0
}
-declare void @t(i32, %struct.S* byval )
+declare void @t(i32, %struct.S* byval(%struct.S))
; REQUIRES: asserts
; RUN: llc -mtriple=i686-- -no-integrated-as < %s -verify-machineinstrs -precompute-phys-liveness
; RUN: llc -mtriple=x86_64-- -no-integrated-as < %s -verify-machineinstrs -precompute-phys-liveness
-
+
; PR6497
; Chain and flag folding issues.
br label %bb29
bb28: ; preds = %bb7
- call void @_ZNSt6vectorIN4llvm11MachineMoveESaIS1_EE13_M_insert_auxEN9__gnu_cxx17__normal_iteratorIPS1_S3_EERKS1_(%t10* %tmp2, %t21* byval align 4 undef, %t13* undef)
+ call void @_ZNSt6vectorIN4llvm11MachineMoveESaIS1_EE13_M_insert_auxEN9__gnu_cxx17__normal_iteratorIPS1_S3_EERKS1_(%t10* %tmp2, %t21* byval(%t21) align 4 undef, %t13* undef)
br label %bb29
bb29: ; preds = %bb28, %bb27
br label %bb37
bb36: ; preds = %bb34
- call void @_ZNSt6vectorIN4llvm11MachineMoveESaIS1_EE13_M_insert_auxEN9__gnu_cxx17__normal_iteratorIPS1_S3_EERKS1_(%t10* %tmp2, %t21* byval align 4 undef, %t13* undef)
+ call void @_ZNSt6vectorIN4llvm11MachineMoveESaIS1_EE13_M_insert_auxEN9__gnu_cxx17__normal_iteratorIPS1_S3_EERKS1_(%t10* %tmp2, %t21* byval(%t21) align 4 undef, %t13* undef)
br label %bb37
bb37: ; preds = %bb36, %bb35, %bb31
declare %t14* @_ZN4llvm9MCContext16CreateTempSymbolEv(%t2*)
-declare void @_ZNSt6vectorIN4llvm11MachineMoveESaIS1_EE13_M_insert_auxEN9__gnu_cxx17__normal_iteratorIPS1_S3_EERKS1_(%t10*, %t21* byval align 4, %t13*)
+declare void @_ZNSt6vectorIN4llvm11MachineMoveESaIS1_EE13_M_insert_auxEN9__gnu_cxx17__normal_iteratorIPS1_S3_EERKS1_(%t10*, %t21* byval(%t21) align 4, %t13*)
declare void @llvm.lifetime.start.p0i8(i64, i8* nocapture) nounwind
; CHECK-NOT: pushq
; CHECK: movl $42, %eax
; CHECK: retq
-define i32 @f0(%struct.s* byval align 8 %input) !dbg !8 {
+define i32 @f0(%struct.s* byval(%struct.s) align 8 %input) !dbg !8 {
call void @llvm.dbg.declare(metadata %struct.s* %input, metadata !4, metadata !17), !dbg !18
ret i32 42, !dbg !18
}
; CHECK: movl $42, %eax
; CHECK: popq %rbp
; CHECK: retq
-define i32 @f1(%struct.s* byval align 8 %input) !dbg !19 {
+define i32 @f1(%struct.s* byval(%struct.s) align 8 %input) !dbg !19 {
%val = load i64, i64* @glob
; this alloca should force FP usage.
%stackspace = alloca i32, i64 %val, align 1
; CHECK: andq $-64, %rsp
; CHECK: subq $64, %rsp
; CHECK: movq %rsp, %rbx
-define i32 @f2(%struct.s* byval align 8 %input) !dbg !22 {
+define i32 @f2(%struct.s* byval(%struct.s) align 8 %input) !dbg !22 {
%val = load i64, i64* @glob
%stackspace = alloca i32, i64 %val, align 64
store i32* %stackspace, i32** @ptr
; the base pointer we use the original adjustment.
%struct.struct_t = type { [5 x i32] }
-define void @t7(i32 %size, %struct.struct_t* byval align 8 %arg1) nounwind uwtable {
+define void @t7(i32 %size, %struct.struct_t* byval(%struct.struct_t) align 8 %arg1) nounwind uwtable {
entry:
%x = alloca i32, align 32
store i32 0, i32* %x, align 32
%0 = zext i32 %size to i64
%vla = alloca i32, i64 %0, align 16
%1 = load i32, i32* %x, align 32
- call void @bar(i32 %1, i32* %vla, %struct.struct_t* byval align 8 %arg1)
+ call void @bar(i32 %1, i32* %vla, %struct.struct_t* byval(%struct.struct_t) align 8 %arg1)
ret void
; CHECK: _t7
declare i8* @llvm.stacksave() nounwind
-declare void @bar(i32, i32*, %struct.struct_t* byval align 8)
+declare void @bar(i32, i32*, %struct.struct_t* byval(%struct.struct_t) align 8)
declare void @llvm.stackrestore(i8*) nounwind
%crd = type { i64, %cr* }
%pp = type { %cc }
-define fastcc void @foo(%pp* nocapture byval %p_arg) {
+define fastcc void @foo(%pp* nocapture byval(%pp) %p_arg) {
; CHECK-LABEL: foo:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: retl
%struct.s0 = type { x86_fp80, x86_fp80 }
; FastISel cannot handle this case yet. Make sure that we abort.
-define i8* @args_fail(%struct.s0* byval nocapture readonly align 16 %y) {
+define i8* @args_fail(%struct.s0* byval(%struct.s0) nocapture readonly align 16 %y) {
%1 = bitcast %struct.s0* %y to i8*
ret i8* %1
}
}
declare zeroext i1 @foo() nounwind
-declare void @foo2(%struct.s* byval)
+declare void @foo2(%struct.s* byval(%struct.s))
define void @test2(%struct.s* %d) nounwind {
- call void @foo2(%struct.s* byval %d )
+ call void @foo2(%struct.s* byval(%struct.s) %d )
ret void
; CHECK-LABEL: test2:
; CHECK: movl (%eax), %ecx
; CHECK-NEXT: addl $65536, %esp
; CHECK-NEXT: pushl %ecx
; CHECK-NEXT: retl
-define x86_thiscallcc void @thiscall_large(i32* %this, [65533 x i8]* byval %b) nounwind {
+define x86_thiscallcc void @thiscall_large(i32* %this, [65533 x i8]* byval([65533 x i8]) %b) nounwind {
ret void
}
; RUN: llc < %s -tailcallopt=false | FileCheck %s
-; CHECK: movl 8(%esp), %eax
-; CHECK: movl 8(%esp), %eax
-; CHECK-NOT: movl 8(%esp), %eax
+; CHECK: movl 8(%esp), %eax
+; CHECK: movl 8(%esp), %eax
+; CHECK-NOT: movl 8(%esp), %eax
; PR3122
; rdar://6400815
%V = alloca %struct.MVT
%a = getelementptr %struct.MVT, %struct.MVT* %V, i32 0, i32 0
store i32 1, i32* %a
- call fastcc void @foo(%struct.MVT* byval %V) nounwind
+ call fastcc void @foo(%struct.MVT* byval(%struct.MVT) %V) nounwind
%t = load i32, i32* %a
ret i32 %t
}
-declare fastcc void @foo(%struct.MVT* byval)
+declare fastcc void @foo(%struct.MVT* byval(%struct.MVT))
declare void @llvm.dbg.declare(metadata, metadata, metadata) #0
-define hidden void @foo(i32* byval %dstRect) {
+define hidden void @foo(i32* byval(i32) %dstRect) {
; CHECK-LABEL: name: foo
entry:
call void @llvm.dbg.declare(metadata i32* %dstRect, metadata !3, metadata !DIExpression()), !dbg !5
declare double @foo()
-define double @carg({ double, double }* byval %z) nounwind {
+define double @carg({ double, double }* byval({ double, double }) %z) nounwind {
; CHECK-LABEL: carg:
; CHECK: ## %bb.0: ## %entry
; CHECK-NEXT: jmp _foo ## TAILCALL
declare fp128 @copysignl(fp128, fp128) #1
; Test more complicated logical operations generated from copysignl.
-define void @TestCopySign({ fp128, fp128 }* noalias nocapture sret %agg.result, { fp128, fp128 }* byval nocapture readonly align 16 %z) #0 {
+define void @TestCopySign({ fp128, fp128 }* noalias nocapture sret %agg.result, { fp128, fp128 }* byval({ fp128, fp128 }) nocapture readonly align 16 %z) #0 {
; SSE-LABEL: TestCopySign:
; SSE: # %bb.0: # %entry
; SSE-NEXT: pushq %rbp
%struct.Buffer = type { i8, [63 x i8] }
-define void @fn2NoDebug(%struct.Buffer* byval align 64 %p1) {
+define void @fn2NoDebug(%struct.Buffer* byval(%struct.Buffer) align 64 %p1) {
ret void
}
; CHECK-NEXT: .cfi_def_cfa %rsp, 8
; CHECK-NEXT: ret
-define void @fn2WithDebug(%struct.Buffer* byval align 64 %p1) !dbg !8 {
+define void @fn2WithDebug(%struct.Buffer* byval(%struct.Buffer) align 64 %p1) !dbg !8 {
call void @llvm.dbg.declare(metadata %struct.Buffer* %p1, metadata !9, metadata !6), !dbg !10
ret void
}
%struct.foo = type { [88 x i8] }
-declare void @bar(i8* nocapture, %struct.foo* align 4 byval) nounwind
+declare void @bar(i8* nocapture, %struct.foo* align 4 byval(%struct.foo)) nounwind
; PR19012
; Don't clobber %esi if we have inline asm that clobbers %esp.
define void @test1(%struct.foo* nocapture %x, i32 %y, i8* %z) nounwind {
- call void @bar(i8* %z, %struct.foo* align 4 byval %x)
+ call void @bar(i8* %z, %struct.foo* align 4 byval(%struct.foo) %x)
call void asm sideeffect inteldialect "xor esp, esp", "=*m,~{flags},~{esp},~{esp},~{dirflag},~{fpsr},~{flags}"(i8* %z)
ret void
ret double %rd
}
-define void @ret_large_struct(%struct.st12_t* noalias nocapture sret %agg.result, %struct.st12_t* byval nocapture readonly align 4 %r) #0 {
+define void @ret_large_struct(%struct.st12_t* noalias nocapture sret(%struct.st12_t) %agg.result, %struct.st12_t* byval(%struct.st12_t) nocapture readonly align 4 %r) #0 {
; CHECK-LABEL: ret_large_struct:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: pushl %esi
; Check that we don't crash due to a memcpy size type mismatch error ("Cannot
; emit physreg copy instruction") in X86InstrInfo::copyPhysReg.
%struct = type { [4096 x i8] }
-declare void @foo(%struct* byval)
+declare void @foo(%struct* byval(%struct))
define void @test2(%struct* %x) {
- call void @foo(%struct* byval %x)
+ call void @foo(%struct* byval(%struct) %x)
ret void
}
%struct.large = type { [4096 x i8] }
-declare void @foo(%struct.large* align 8 byval) nounwind
+declare void @foo(%struct.large* align 8 byval(%struct.large)) nounwind
define void @test1(%struct.large* nocapture %x) nounwind {
; NOFAST32-LABEL: test1:
; FAST-NEXT: callq foo
; FAST-NEXT: addq $4104, %rsp # imm = 0x1008
; FAST-NEXT: retq
- call void @foo(%struct.large* align 8 byval %x)
+ call void @foo(%struct.large* align 8 byval(%struct.large) %x)
ret void
}
; FAST-NEXT: callq foo
; FAST-NEXT: addq $4104, %rsp # imm = 0x1008
; FAST-NEXT: retq
- call void @foo(%struct.large* align 8 byval %x)
+ call void @foo(%struct.large* align 8 byval(%struct.large) %x)
ret void
}
%struct.large_oddsize = type { [4095 x i8] }
-declare void @foo_oddsize(%struct.large_oddsize* align 8 byval) nounwind
+declare void @foo_oddsize(%struct.large_oddsize* align 8 byval(%struct.large_oddsize)) nounwind
define void @test3(%struct.large_oddsize* nocapture %x) nounwind minsize {
; NOFAST32-LABEL: test3:
; FAST-NEXT: callq foo_oddsize
; FAST-NEXT: addq $4104, %rsp # imm = 0x1008
; FAST-NEXT: retq
- call void @foo_oddsize(%struct.large_oddsize* align 8 byval %x)
+ call void @foo_oddsize(%struct.large_oddsize* align 8 byval(%struct.large_oddsize) %x)
ret void
}
@.str100 = external hidden unnamed_addr constant [50 x i8], align 1
@__PRETTY_FUNCTION__._ZNK4llvm6SDNode10getOperandEj = external hidden unnamed_addr constant [66 x i8], align 1
-declare { %"class.llvm::SDNode.10.610.970.1930.2050.2290.4090"*, i32 } @_ZN4llvm12SelectionDAG7getNodeEjNS_5SDLocENS_3EVTENS_7SDValueES3_(%"class.llvm::SelectionDAG.104.704.1064.2024.2144.2384.4184"*, i32, i8*, i32, i32, %"class.llvm::Type.7.607.967.1927.2047.2287.4087"*, %"class.llvm::SDValue.3.603.963.1923.2043.2283.4083"* byval align 8, %"class.llvm::SDValue.3.603.963.1923.2043.2283.4083"* byval align 8)
+declare { %"class.llvm::SDNode.10.610.970.1930.2050.2290.4090"*, i32 } @_ZN4llvm12SelectionDAG7getNodeEjNS_5SDLocENS_3EVTENS_7SDValueES3_(%"class.llvm::SelectionDAG.104.704.1064.2024.2144.2384.4184"*, i32, i8*, i32, i32, %"class.llvm::Type.7.607.967.1927.2047.2287.4087"*, %"class.llvm::SDValue.3.603.963.1923.2043.2283.4083"* byval(%"class.llvm::SDValue.3.603.963.1923.2043.2283.4083") align 8, %"class.llvm::SDValue.3.603.963.1923.2043.2283.4083"* byval(%"class.llvm::SDValue.3.603.963.1923.2043.2283.4083") align 8)
; Function Attrs: noreturn nounwind
declare void @__assert_fail(i8*, i8*, i32, i8*) #0
; CHECK: movl $-1, %ecx
; CHECK: callq _ZN4llvm12SelectionDAG7getNodeEjNS_5SDLocENS_3EVTENS_7SDValueES3_
- %call18 = call { %"class.llvm::SDNode.10.610.970.1930.2050.2290.4090"*, i32 } @_ZN4llvm12SelectionDAG7getNodeEjNS_5SDLocENS_3EVTENS_7SDValueES3_(%"class.llvm::SelectionDAG.104.704.1064.2024.2144.2384.4184"* undef, i32 undef, i8* undef, i32 -1, i32 %retval.sroa.0.0.copyload.i37, %"class.llvm::Type.7.607.967.1927.2047.2287.4087"* undef, %"class.llvm::SDValue.3.603.963.1923.2043.2283.4083"* byval align 8 undef, %"class.llvm::SDValue.3.603.963.1923.2043.2283.4083"* byval align 8 undef) #1
+ %call18 = call { %"class.llvm::SDNode.10.610.970.1930.2050.2290.4090"*, i32 } @_ZN4llvm12SelectionDAG7getNodeEjNS_5SDLocENS_3EVTENS_7SDValueES3_(%"class.llvm::SelectionDAG.104.704.1064.2024.2144.2384.4184"* undef, i32 undef, i8* undef, i32 -1, i32 %retval.sroa.0.0.copyload.i37, %"class.llvm::Type.7.607.967.1927.2047.2287.4087"* undef, %"class.llvm::SDValue.3.603.963.1923.2043.2283.4083"* byval(%"class.llvm::SDValue.3.603.963.1923.2043.2283.4083") align 8 undef, %"class.llvm::SDValue.3.603.963.1923.2043.2283.4083"* byval(%"class.llvm::SDValue.3.603.963.1923.2043.2283.4083") align 8 undef) #1
ret { %"class.llvm::SDNode.10.610.970.1930.2050.2290.4090"*, i32 } %call18
}
declare void @eightparams(i32 %a, i32 %b, i32 %c, i32 %d, i32 %e, i32 %f, i32 %g, i32 %h)
declare void @eightparams16(i16 %a, i16 %b, i16 %c, i16 %d, i16 %e, i16 %f, i16 %g, i16 %h)
declare void @eightparams64(i64 %a, i64 %b, i64 %c, i64 %d, i64 %e, i64 %f, i64 %g, i64 %h)
-declare void @struct(%struct.s* byval %a, i32 %b, i32 %c, i32 %d)
+declare void @struct(%struct.s* byval(%struct.s) %a, i32 %b, i32 %c, i32 %d)
declare void @inalloca(<{ %struct.s }>* inalloca)
declare i8* @llvm.stacksave()
ret void
}
-; Check that pushing the addresses of globals (Or generally, things that
+; Check that pushing the addresses of globals (Or generally, things that
; aren't exactly immediates) isn't broken.
; Fixes PR21878.
; NORMAL-LABEL: test6:
; (because it has frame-index references), then we must resolve
; these references correctly.
; NORMAL-LABEL: test9:
-; NORMAL-NOT: leal (%esp),
+; NORMAL-NOT: leal (%esp),
; NORMAL: pushl $4
; NORMAL-NEXT: pushl $3
; NORMAL-NEXT: pushl $2
call void @good(i32 1, i32 2, i32 3, i32 4)
%pv = ptrtoint i32* %p to i32
%qv = ptrtoint i32* %q to i32
- call void @struct(%struct.s* byval %s, i32 6, i32 %qv, i32 %pv)
+ call void @struct(%struct.s* byval(%struct.s) %s, i32 6, i32 %qv, i32 %pv)
ret void
}
ret void
}
-; We can't fold the load from the global into the push because of
+; We can't fold the load from the global into the push because of
; interference from the store
; NORMAL-LABEL: test11:
; NORMAL: movl _the_global, [[EAX:%e..]]
ret void
}
-; Converting one mov into a push isn't worth it when
+; Converting one mov into a push isn't worth it when
; doing so forces too much overhead for other calls.
; NORMAL-LABEL: test12:
; NORMAL: pushl $8
; NORMAL-NEXT: calll _good
define void @test12() optsize {
entry:
- %s = alloca %struct.s, align 4
+ %s = alloca %struct.s, align 4
call void @struct(%struct.s* %s, i32 2, i32 3, i32 4)
call void @good(i32 5, i32 6, i32 7, i32 8)
call void @struct(%struct.s* %s, i32 10, i32 11, i32 12)
; NORMAL=NEXT: addl $16, %esp
define void @test12b() optsize {
entry:
- %s = alloca %struct.s, align 4
- call void @good(i32 1, i32 2, i32 3, i32 4)
+ %s = alloca %struct.s, align 4
+ call void @good(i32 1, i32 2, i32 3, i32 4)
call void @struct(%struct.s* %s, i32 6, i32 7, i32 8)
call void @good(i32 9, i32 10, i32 11, i32 12)
ret void
; NORMAL: retl
%struct.A = type { i32, i32 }
%struct.B = type { i8 }
-declare x86_thiscallcc %struct.B* @B_ctor(%struct.B* returned, %struct.A* byval)
+declare x86_thiscallcc %struct.B* @B_ctor(%struct.B* returned, %struct.A* byval(%struct.A))
declare void @B_func(%struct.B* sret, %struct.B*, i32)
define void @test14(%struct.A* %a) {
entry:
%0 = bitcast %struct.A* %a to i64*
%1 = load i64, i64* %0, align 4
store i64 %1, i64* %agg.tmp, align 4
- %call = call x86_thiscallcc %struct.B* @B_ctor(%struct.B* %ref.tmp, %struct.A* byval %tmpcast)
+ %call = call x86_thiscallcc %struct.B* @B_ctor(%struct.B* %ref.tmp, %struct.A* byval(%struct.A) %tmpcast)
%2 = getelementptr inbounds %struct.B, %struct.B* %tmp, i32 0, i32 0
call void @B_func(%struct.B* sret %tmp, %struct.B* %ref.tmp, i32 1)
ret void
; NORMAL-NEXT: addl $32, %esp
;
; NOPUSH-LABEL: pr34863_16
-; NOPUSH: subl $32, %esp
-; NOPUSH-NEXT: movl 36(%esp), %eax
-; NOPUSH-NEXT: movl %eax, 20(%esp)
-; NOPUSH-NEXT: movl %eax, 16(%esp)
-; NOPUSH-NEXT: movl %eax, 12(%esp)
-; NOPUSH-NEXT: movl %eax, 8(%esp)
-; NOPUSH-NEXT: movl %eax, 4(%esp)
-; NOPUSH-NEXT: movl %eax, (%esp)
+; NOPUSH: subl $32, %esp
+; NOPUSH-NEXT: movl 36(%esp), %eax
+; NOPUSH-NEXT: movl %eax, 20(%esp)
+; NOPUSH-NEXT: movl %eax, 16(%esp)
+; NOPUSH-NEXT: movl %eax, 12(%esp)
+; NOPUSH-NEXT: movl %eax, 8(%esp)
+; NOPUSH-NEXT: movl %eax, 4(%esp)
+; NOPUSH-NEXT: movl %eax, (%esp)
; NOPUSH-NEXT: movl $65535, 28(%esp)
-; NOPUSH-NEXT: andl $0, 24(%esp)
-; NOPUSH-NEXT: calll _eightparams16
+; NOPUSH-NEXT: andl $0, 24(%esp)
+; NOPUSH-NEXT: calll _eightparams16
; NOPUSH-NEXT: addl $32, %esp
define void @pr34863_16(i16 %x) minsize nounwind {
entry:
; NORMAL-NEXT: addl $32, %esp
;
; NOPUSH-LABEL: pr34863_32
-; NOPUSH: subl $32, %esp
+; NOPUSH: subl $32, %esp
; NOPUSH-NEXT: movl 36(%esp), %eax
; NOPUSH-NEXT: movl %eax, 20(%esp)
; NOPUSH-NEXT: movl %eax, 16(%esp)
; NOPUSH-NEXT: movl %eax, 12(%esp)
-; NOPUSH-NEXT: movl %eax, 8(%esp)
-; NOPUSH-NEXT: movl %eax, 4(%esp)
-; NOPUSH-NEXT: movl %eax, (%esp)
-; NOPUSH-NEXT: orl $-1, 28(%esp)
-; NOPUSH-NEXT: andl $0, 24(%esp)
-; NOPUSH-NEXT: calll _eightparams
-; NOPUSH-NEXT: addl $32, %esp
+; NOPUSH-NEXT: movl %eax, 8(%esp)
+; NOPUSH-NEXT: movl %eax, 4(%esp)
+; NOPUSH-NEXT: movl %eax, (%esp)
+; NOPUSH-NEXT: orl $-1, 28(%esp)
+; NOPUSH-NEXT: andl $0, 24(%esp)
+; NOPUSH-NEXT: calll _eightparams
+; NOPUSH-NEXT: addl $32, %esp
define void @pr34863_32(i32 %x) minsize nounwind {
entry:
tail call void @eightparams(i32 %x, i32 %x, i32 %x, i32 %x, i32 %x, i32 %x, i32 0, i32 -1)
; NORMAL-NEXT: addl $64, %esp
;
; NOPUSH-LABEL: pr34863_64
-; NOPUSH: subl $64, %esp
+; NOPUSH: subl $64, %esp
; NOPUSH-NEXT: movl 68(%esp), %eax
; NOPUSH-NEXT: movl 72(%esp), %ecx
; NOPUSH-NEXT: movl %ecx, 44(%esp)
; NOPUSH-NEXT: movl %ecx, 20(%esp)
; NOPUSH-NEXT: movl %eax, 16(%esp)
; NOPUSH-NEXT: movl %ecx, 12(%esp)
-; NOPUSH-NEXT: movl %eax, 8(%esp)
-; NOPUSH-NEXT: movl %ecx, 4(%esp)
-; NOPUSH-NEXT: movl %eax, (%esp)
-; NOPUSH-NEXT: orl $-1, 60(%esp)
-; NOPUSH-NEXT: orl $-1, 56(%esp)
-; NOPUSH-NEXT: andl $0, 52(%esp)
-; NOPUSH-NEXT: andl $0, 48(%esp)
+; NOPUSH-NEXT: movl %eax, 8(%esp)
+; NOPUSH-NEXT: movl %ecx, 4(%esp)
+; NOPUSH-NEXT: movl %eax, (%esp)
+; NOPUSH-NEXT: orl $-1, 60(%esp)
+; NOPUSH-NEXT: orl $-1, 56(%esp)
+; NOPUSH-NEXT: andl $0, 52(%esp)
+; NOPUSH-NEXT: andl $0, 48(%esp)
; NOPUSH-NEXT: calll _eightparams64
-; NOPUSH-NEXT: addl $64, %esp
+; NOPUSH-NEXT: addl $64, %esp
define void @pr34863_64(i64 %x) minsize nounwind {
entry:
tail call void @eightparams64(i64 %x, i64 %x, i64 %x, i64 %x, i64 %x, i64 %x, i64 0, i64 -1)
declare void @good(i32, i32, i32, i32)
- declare void @struct(%struct.s* byval, i32, i32, i32)
+ declare void @struct(%struct.s* byval(%struct.s), i32, i32, i32)
; Function Attrs: optsize
define void @test9() #0 {
call void @good(i32 1, i32 2, i32 3, i32 4)
%pv = ptrtoint i32* %p to i32
%qv = ptrtoint i32* %q to i32
- call void @struct(%struct.s* byval %s, i32 6, i32 %qv, i32 %pv)
+ call void @struct(%struct.s* byval(%struct.s) %s, i32 6, i32 %qv, i32 %pv)
ret void
}
declare %"struct.FixedMatrixBase<double,6,6>"* @_ZN15FixedMatrixBaseIdLi6ELi6EEmIERKS0_(%"struct.FixedMatrixBase<double,6,6>"*, %"struct.FixedMatrixBase<double,6,6>"*)
-declare void @_ZN13CDSVectorBaseI4Vec3N3CDS12DefaultAllocEEC2EiS2_(%"struct.CDSVectorBase<Vec3,CDS::DefaultAlloc>"*, i32, %"struct.CDS::DefaultAlloc"* byval align 4)
+declare void @_ZN13CDSVectorBaseI4Vec3N3CDS12DefaultAllocEEC2EiS2_(%"struct.CDSVectorBase<Vec3,CDS::DefaultAlloc>"*, i32, %"struct.CDS::DefaultAlloc"* byval(%"struct.CDS::DefaultAlloc") align 4)
declare void @_ZN13CDSVectorBaseI4Vec3N3CDS12DefaultAllocEED2Ev(%"struct.CDSVectorBase<Vec3,CDS::DefaultAlloc>"*)
%2 = load i32, i32* @g_b, align 4, !tbaa !3
%3 = load i32, i32* @g_a, align 4, !tbaa !3
%call = tail call i32 @bar(i32 %3, i32 %2, i32 %1, i32 %0) #2
- tail call void @foo(%struct._param_str* byval nonnull align 4 @g_param) #2
+ tail call void @foo(%struct._param_str* byval(%struct._param_str) nonnull align 4 @g_param) #2
ret i32 0
}
declare dso_local i32 @bar(i32, i32, i32, i32) local_unnamed_addr
-declare dso_local void @foo(%struct._param_str* byval align 4) local_unnamed_addr
+declare dso_local void @foo(%struct._param_str* byval(%struct._param_str) align 4) local_unnamed_addr
!3 = !{!4, !4, i64 0}
!4 = !{!"int", !5, i64 0}
; We can fold the 16-byte constant load into either 'xor' instruction,
; but we do not. It has more than one use, so it gets loaded into a register.
-define void @foo(%struct.anon* byval %p) nounwind {
+define void @foo(%struct.anon* byval(%struct.anon) %p) nounwind {
; CHECK-LABEL: foo:
; CHECK: ## %bb.0: ## %entry
; CHECK-NEXT: subl $28, %esp
%struct.face = type { [7 x i32] }
; Function Attrs: noinline nounwind uwtable
-declare void @bar(%struct.face* byval nocapture readonly align 8);
+declare void @bar(%struct.face* byval(%struct.face) nocapture readonly align 8);
; Function Attrs: noinline nounwind uwtable
-define void @foo(%struct.face* byval nocapture align 8) local_unnamed_addr {
+define void @foo(%struct.face* byval(%struct.face) nocapture align 8) local_unnamed_addr {
; CHECK-LABEL: foo:
; CHECK: # %bb.0:
; CHECK-NEXT: subq $40, %rsp
store <4 x i32> <i32 1, i32 1, i32 1, i32 1>, <4 x i32>* %2, align 8
%3 = getelementptr inbounds %struct.face, %struct.face* %0, i64 0, i32 0, i64 4
store i32 1, i32* %3, align 8
- call void @bar(%struct.face* byval nonnull align 8 %0)
+ call void @bar(%struct.face* byval(%struct.face) nonnull align 8 %0)
ret void
}
%byval-temp = alloca %struct.a, align 8
%0 = bitcast %struct.a* %byval-temp to i8*
call void @llvm.memcpy.p0i8.p0i8.i32(i8* nonnull align 8 %0, i8* align 4 bitcast (%struct.a* @c to i8*), i32 260, i1 false)
- call void @d(%struct.a* byval nonnull align 8 %byval-temp)
+ call void @d(%struct.a* byval(%struct.a) nonnull align 8 %byval-temp)
ret void
}
-declare void @d(%struct.a* byval align 8) local_unnamed_addr #1
+declare void @d(%struct.a* byval(%struct.a) align 8) local_unnamed_addr #1
declare void @llvm.memcpy.p0i8.p0i8.i32(i8* nocapture writeonly, i8* nocapture readonly, i32, i1)
%struct.p = type { i32, i32, i32, i32, i32, i32, i32, i32, i32, i32, i32, i32 }
-define i32 @f(%struct.p* byval align 4 %q) nounwind ssp {
+define i32 @f(%struct.p* byval(%struct.p) align 4 %q) nounwind ssp {
entry:
; 32: _f:
; 32: jmp _g
; 64: _f:
; 64: jmp _g
- %call = tail call i32 @g(%struct.p* byval align 4 %q) nounwind
+ %call = tail call i32 @g(%struct.p* byval(%struct.p) align 4 %q) nounwind
ret i32 %call
}
-declare i32 @g(%struct.p* byval align 4)
+declare i32 @g(%struct.p* byval(%struct.p) align 4)
-define i32 @h(%struct.p* byval align 4 %q, i32 %r) nounwind ssp {
+define i32 @h(%struct.p* byval(%struct.p) align 4 %q, i32 %r) nounwind ssp {
entry:
; 32: _h:
; 32: jmp _i
; 64: _h:
; 64: jmp _i
- %call = tail call i32 @i(%struct.p* byval align 4 %q, i32 %r) nounwind
+ %call = tail call i32 @i(%struct.p* byval(%struct.p) align 4 %q, i32 %r) nounwind
ret i32 %call
}
-declare i32 @i(%struct.p* byval align 4, i32)
+declare i32 @i(%struct.p* byval(%struct.p) align 4, i32)
%struct.t = type { i32, i32, i32, i32, i32 }
-define i32 @t12(i32 %x, i32 %y, %struct.t* byval align 4 %z) nounwind ssp {
+define i32 @t12(i32 %x, i32 %y, %struct.t* byval(%struct.t) align 4 %z) nounwind ssp {
; X86-LABEL: t12:
; X86: # %bb.0: # %entry
; X86-NEXT: cmpl $0, {{[0-9]+}}(%esp)
br i1 %0, label %bb2, label %bb
bb:
- %1 = tail call i32 @foo6(i32 %x, i32 %y, %struct.t* byval align 4 %z) nounwind
+ %1 = tail call i32 @foo6(i32 %x, i32 %y, %struct.t* byval(%struct.t) align 4 %z) nounwind
ret i32 %1
bb2:
ret i32 0
}
-declare i32 @foo6(i32, i32, %struct.t* byval align 4)
+declare i32 @foo6(i32, i32, %struct.t* byval(%struct.t) align 4)
; rdar://r7717598
%struct.ns = type { i32, i32 }
; X32-NEXT: popq %rcx
; X32-NEXT: retq
entry:
- %0 = tail call fastcc %struct.ns* @foo7(%struct.cp* byval align 4 %yy, i8 signext 0) nounwind
+ %0 = tail call fastcc %struct.ns* @foo7(%struct.cp* byval(%struct.cp) align 4 %yy, i8 signext 0) nounwind
ret %struct.ns* %0
}
; rdar://6195379
; llvm can't do sibcall for this in 32-bit mode (yet).
-declare fastcc %struct.ns* @foo7(%struct.cp* byval align 4, i8 signext) nounwind ssp
+declare fastcc %struct.ns* @foo7(%struct.cp* byval(%struct.cp) align 4, i8 signext) nounwind ssp
%struct.__block_descriptor = type { i64, i64 }
%struct.__block_descriptor_withcopydispose = type { i64, i64, i8*, i8* }
attributes #0 = { nounwind uwtable "frame-pointer"="all" }
-define i32 @test1(i64 %n, %Foo* byval nocapture readnone align 8 %f) #0 {
+define i32 @test1(i64 %n, %Foo* byval(%Foo) nocapture readnone align 8 %f) #0 {
entry:
%buf = alloca [5 x i8*], align 16
%p = alloca i8*, align 8
; RUN: llc < %s -stack-symbol-ordering=0 -frame-pointer=all -mtriple=x86_64-pc-linux-gnu -mcpu=corei7 -o - | FileCheck %s
; This test is fairly fragile. The goal is to ensure that "large" stack
-; objects are allocated closest to the stack protector (i.e., farthest away
+; objects are allocated closest to the stack protector (i.e., farthest away
; from the Stack Pointer.) In standard SSP mode this means that large (>=
; ssp-buffer-size) arrays and structures containing such arrays are
; closet to the protector. With sspstrong and sspreq this means large
%coerce.dive26 = getelementptr %struct.struct_small_nonchar, %struct.struct_small_nonchar* %d, i32 0, i32 0
%7 = bitcast [2 x i16]* %coerce.dive26 to i32*
%8 = load i32, i32* %7, align 1
- call void @takes_all(i64 %4, i16 %6, %struct.struct_large_nonchar* byval align 8 %c, i32 %8, i8* %arraydecay, i8* %arraydecay22, i32* %arraydecay23, i16* %arraydecay24, i32* %ptr, i32 %0, i32 %1, i32 %2)
+ call void @takes_all(i64 %4, i16 %6, %struct.struct_large_nonchar* byval(%struct.struct_large_nonchar) align 8 %c, i32 %8, i8* %arraydecay, i8* %arraydecay22, i32* %arraydecay23, i16* %arraydecay24, i32* %ptr, i32 %0, i32 %1, i32 %2)
ret void
}
; -56 large_char . arrays >= ssp-buffer-size
; -64 struct_large_char .
; -96 struct_large_nonchar .
-; -100 small_non_char | Group 2, nested arrays,
+; -100 small_non_char | Group 2, nested arrays,
; -102 small_char | arrays < ssp-buffer-size
; -104 struct_small_char |
; -112 struct_small_nonchar |
; -120 scalar + Group 4, everything else
; -124 scalar +
; -128 scalar +
-;
+;
; CHECK: layout_sspstrong:
; CHECK: call{{l|q}} get_scalar1
; CHECK: movl %eax, -120(
%coerce.dive26 = getelementptr %struct.struct_small_nonchar, %struct.struct_small_nonchar* %d, i32 0, i32 0
%7 = bitcast [2 x i16]* %coerce.dive26 to i32*
%8 = load i32, i32* %7, align 1
- call void @takes_all(i64 %4, i16 %6, %struct.struct_large_nonchar* byval align 8 %c, i32 %8, i8* %arraydecay, i8* %arraydecay22, i32* %arraydecay23, i16* %arraydecay24, i32* %ptr, i32 %0, i32 %1, i32 %2)
+ call void @takes_all(i64 %4, i16 %6, %struct.struct_large_nonchar* byval(%struct.struct_large_nonchar) align 8 %c, i32 %8, i8* %arraydecay, i8* %arraydecay22, i32* %arraydecay23, i16* %arraydecay24, i32* %ptr, i32 %0, i32 %1, i32 %2)
ret void
}
define void @layout_sspreq() nounwind uwtable sspreq {
entry:
; Expected stack layout for sspreq is the same as sspstrong
-;
+;
; CHECK: layout_sspreq:
; CHECK: call{{l|q}} get_scalar1
; CHECK: movl %eax, -120(
%coerce.dive26 = getelementptr %struct.struct_small_nonchar, %struct.struct_small_nonchar* %d, i32 0, i32 0
%7 = bitcast [2 x i16]* %coerce.dive26 to i32*
%8 = load i32, i32* %7, align 1
- call void @takes_all(i64 %4, i16 %6, %struct.struct_large_nonchar* byval align 8 %c, i32 %8, i8* %arraydecay, i8* %arraydecay22, i32* %arraydecay23, i16* %arraydecay24, i32* %ptr, i32 %0, i32 %1, i32 %2)
+ call void @takes_all(i64 %4, i16 %6, %struct.struct_large_nonchar* byval(%struct.struct_large_nonchar) align 8 %c, i32 %8, i8* %arraydecay, i8* %arraydecay22, i32* %arraydecay23, i16* %arraydecay24, i32* %ptr, i32 %0, i32 %1, i32 %2)
ret void
}
declare signext i16 @get_struct_small_nonchar()
declare void @end_struct_small_nonchar()
-declare void @takes_all(i64, i16, %struct.struct_large_nonchar* byval align 8, i32, i8*, i8*, i32*, i16*, i32*, i32, i32, i32)
+declare void @takes_all(i64, i16, %struct.struct_large_nonchar* byval(%struct.struct_large_nonchar) align 8, i32, i8*, i8*, i32*, i16*, i32*, i32, i32, i32)
declare void @takes_two(i32, i8*)
%struct.foo = type { [88 x i8] }
-declare void @bar(i8* nocapture, %struct.foo* align 4 byval) nounwind
+declare void @bar(i8* nocapture, %struct.foo* align 4 byval(%struct.foo)) nounwind
declare void @baz(i8*) nounwind
; PR15249
; CHECK-NEXT: popl %ebp
; CHECK-NEXT: retl
%dynalloc = alloca i8, i32 %y, align 1
- call void @bar(i8* %dynalloc, %struct.foo* align 4 byval %x)
+ call void @bar(i8* %dynalloc, %struct.foo* align 4 byval(%struct.foo) %x)
ret void
}
; CHECK-NEXT: popl %ebx
; CHECK-NEXT: popl %ebp
; CHECK-NEXT: retl
- call void @bar(i8* %z, %struct.foo* align 4 byval %x)
+ call void @bar(i8* %z, %struct.foo* align 4 byval(%struct.foo) %x)
%dynalloc = alloca i8, i32 %y, align 1
call void @baz(i8* %dynalloc)
ret void
; CHECK-NEXT: popl %edi
; CHECK-NEXT: popl %ebp
; CHECK-NEXT: retl
- call void @bar(i8* %z, %struct.foo* align 4 byval %x)
+ call void @bar(i8* %z, %struct.foo* align 4 byval(%struct.foo) %x)
%statalloc = alloca i8, i32 8, align 1
call void @baz(i8* %statalloc)
ret void
target triple = "i686-apple-darwin8"
@G = external global double
-define void @test({ double, double }* byval %z, double* %P) nounwind {
+define void @test({ double, double }* byval({ double, double }) %z, double* %P) nounwind {
entry:
%tmp3 = load double, double* @G, align 16 ; <double> [#uses=1]
%tmp4 = tail call double @fabs( double %tmp3 ) readnone ; <double> [#uses=1]
; Accessing stack parameters shouldn't assume stack alignment. Here we should
; emit two 8-byte loads, followed by two 8-byte stores.
-define x86_stdcallcc void @test5(%struct.sixteen* byval nocapture readonly align 4 %s) #0 {
+define x86_stdcallcc void @test5(%struct.sixteen* byval(%struct.sixteen) nocapture readonly align 4 %s) #0 {
%d.sroa.0 = alloca [16 x i8], align 1
%1 = getelementptr inbounds [16 x i8], [16 x i8]* %d.sroa.0, i32 0, i32 0
call void @llvm.lifetime.start.p0i8(i64 16, i8* %1)
%struct2 = type { i64, i64, i64 }
-declare void @consume_attributes(i32, i8* nest, i32, %struct2* byval)
+declare void @consume_attributes(i32, i8* nest, i32, %struct2* byval(%struct2))
-define void @test_attributes(%struct2* byval %s) gc "statepoint-example" {
+define void @test_attributes(%struct2* byval(%struct2) %s) gc "statepoint-example" {
; CHECK-LABEL: test_attributes:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: pushq %rax
entry:
; Check that arguments with attributes are lowered correctly.
; We call a function that has a nest argument and a byval argument.
- %statepoint_token = call token (i64, i32, void (i32, i8*, i32, %struct2*)*, i32, i32, ...) @llvm.experimental.gc.statepoint.p0f_isVoidi32p0i8i32p0s_struct2sf(i64 0, i32 0, void (i32, i8*, i32, %struct2*)* @consume_attributes, i32 4, i32 0, i32 42, i8* nest null, i32 17, %struct2* byval %s, i32 0, i32 0)
+ %statepoint_token = call token (i64, i32, void (i32, i8*, i32, %struct2*)*, i32, i32, ...) @llvm.experimental.gc.statepoint.p0f_isVoidi32p0i8i32p0s_struct2sf(i64 0, i32 0, void (i32, i8*, i32, %struct2*)* @consume_attributes, i32 4, i32 0, i32 42, i8* nest null, i32 17, %struct2* byval(%struct2) %s, i32 0, i32 0)
ret void
}
declare void @use(%struct*)
-define void @test_fixed_arg(%struct* byval %x) gc "statepoint-example" {
+define void @test_fixed_arg(%struct* byval(%struct) %x) gc "statepoint-example" {
; CHECK-LABEL: test_fixed_arg
; CHECK: pushq %rax
; CHECK: leaq 16(%rsp), %rdi
i32, i32, i32, i32, i32, i32, i32, i32,
i32, i32, i32, i32, i32, i32, i32, i32 }
-define fastcc i32 @tailcallee(%struct.s* byval %a) nounwind {
+define fastcc i32 @tailcallee(%struct.s* byval(%struct.s) %a) nounwind {
entry:
%tmp2 = getelementptr %struct.s, %struct.s* %a, i32 0, i32 0
%tmp3 = load i32, i32* %tmp2
; CHECK: movl 4(%esp), %eax
}
-define fastcc i32 @tailcaller(%struct.s* byval %a) nounwind {
+define fastcc i32 @tailcaller(%struct.s* byval(%struct.s) %a) nounwind {
entry:
- %tmp4 = tail call fastcc i32 @tailcallee(%struct.s* byval %a )
+ %tmp4 = tail call fastcc i32 @tailcallee(%struct.s* byval(%struct.s) %a )
ret i32 %tmp4
; CHECK: tailcaller
; CHECK: jmp tailcallee
; A sequence of copyto/copyfrom virtual registers is used to deal with byval
; lowering appearing after moving arguments to registers. The following two
; checks verify that the register allocator changes those sequences to direct
-; moves to argument register where it can (for registers that are not used in
+; moves to argument register where it can (for registers that are not used in
; byval lowering - not rsi, not rdi, not rcx).
; Expect argument 4 to be moved directly to register edx.
; CHECK: movl $7, %edx
i64, i64, i64, i64, i64, i64, i64, i64,
i64, i64, i64, i64, i64, i64, i64, i64 }
-declare fastcc i64 @tailcallee(%struct.s* byval %a, i64 %val, i64 %val2, i64 %val3, i64 %val4, i64 %val5)
+declare fastcc i64 @tailcallee(%struct.s* byval(%struct.s) %a, i64 %val, i64 %val2, i64 %val3, i64 %val4, i64 %val5)
-define fastcc i64 @tailcaller(i64 %b, %struct.s* byval %a) {
+define fastcc i64 @tailcaller(i64 %b, %struct.s* byval(%struct.s) %a) {
entry:
%tmp2 = getelementptr %struct.s, %struct.s* %a, i32 0, i32 1
%tmp3 = load i64, i64* %tmp2, align 8
- %tmp4 = tail call fastcc i64 @tailcallee(%struct.s* byval %a , i64 %tmp3, i64 %b, i64 7, i64 13, i64 17)
+ %tmp4 = tail call fastcc i64 @tailcallee(%struct.s* byval(%struct.s) %a , i64 %tmp3, i64 %b, i64 7, i64 13, i64 17)
ret i64 %tmp4
}
i32, i32, i32, i32, i32, i32, i32, i32,
i32, i32, i32, i32, i32, i32, i32, i32 }
-define tailcc i32 @tailcallee(%struct.s* byval %a) nounwind {
+define tailcc i32 @tailcallee(%struct.s* byval(%struct.s) %a) nounwind {
entry:
%tmp2 = getelementptr %struct.s, %struct.s* %a, i32 0, i32 0
%tmp3 = load i32, i32* %tmp2
; CHECK: movl 4(%esp), %eax
}
-define tailcc i32 @tailcaller(%struct.s* byval %a) nounwind {
+define tailcc i32 @tailcaller(%struct.s* byval(%struct.s) %a) nounwind {
entry:
- %tmp4 = tail call tailcc i32 @tailcallee(%struct.s* byval %a )
+ %tmp4 = tail call tailcc i32 @tailcallee(%struct.s* byval(%struct.s) %a )
ret i32 %tmp4
; CHECK: tailcaller
; CHECK: jmp tailcallee
; A sequence of copyto/copyfrom virtual registers is used to deal with byval
; lowering appearing after moving arguments to registers. The following two
; checks verify that the register allocator changes those sequences to direct
-; moves to argument register where it can (for registers that are not used in
+; moves to argument register where it can (for registers that are not used in
; byval lowering - not rsi, not rdi, not rcx).
; Expect argument 4 to be moved directly to register edx.
; CHECK: movl $7, %edx
i64, i64, i64, i64, i64, i64, i64, i64,
i64, i64, i64, i64, i64, i64, i64, i64 }
-declare tailcc i64 @tailcallee(%struct.s* byval %a, i64 %val, i64 %val2, i64 %val3, i64 %val4, i64 %val5)
+declare tailcc i64 @tailcallee(%struct.s* byval(%struct.s) %a, i64 %val, i64 %val2, i64 %val3, i64 %val4, i64 %val5)
-define tailcc i64 @tailcaller(i64 %b, %struct.s* byval %a) {
+define tailcc i64 @tailcaller(i64 %b, %struct.s* byval(%struct.s) %a) {
entry:
%tmp2 = getelementptr %struct.s, %struct.s* %a, i32 0, i32 1
%tmp3 = load i64, i64* %tmp2, align 8
- %tmp4 = tail call tailcc i64 @tailcallee(%struct.s* byval %a , i64 %tmp3, i64 %b, i64 7, i64 13, i64 17)
+ %tmp4 = tail call tailcc i64 @tailcallee(%struct.s* byval(%struct.s) %a , i64 %tmp3, i64 %b, i64 7, i64 13, i64 17)
ret i64 %tmp4
}
; RUN: llc -mtriple x86_64-w64-mingw32 %s -o - | FileCheck %s
-declare void @foo({ float, double }* byval)
+declare void @foo({ float, double }* byval({ float, double }))
@G = external constant { float, double }
define void @bar()
; CHECK: movq %rax, 40(%rsp)
; CHECK: movq %rcx, 32(%rsp)
; CHECK: leaq 32(%rsp), %rcx
- call void @foo({ float, double }* byval @G)
+ call void @foo({ float, double }* byval({ float, double }) @G)
ret void
}
-define void @baz({ float, double }* byval %arg)
+define void @baz({ float, double }* byval({ float, double }) %arg)
{
; On Win64 the byval is effectively ignored on declarations, since we do
; pass a real pointer in registers. However, by our semantics if we pass
; CHECK: movq %rcx, 40(%rsp)
; CHECK: movq %rax, 32(%rsp)
; CHECK: leaq 32(%rsp), %rcx
- call void @foo({ float, double }* byval %arg)
+ call void @foo({ float, double }* byval({ float, double }) %arg)
ret void
}
-declare void @foo2({ float, double }* byval, { float, double }* byval, { float, double }* byval, { float, double }* byval, { float, double }* byval, i64 %f)
+declare void @foo2({ float, double }* byval({ float, double }), { float, double }* byval({ float, double }), { float, double }* byval({ float, double }), { float, double }* byval({ float, double }), { float, double }* byval({ float, double }), i64 %f)
@data = external constant { float, double }
define void @test() {
; CHECK-NEXT: leaq 96(%rsp), %rdx
; CHECK-NEXT: leaq 80(%rsp), %r8
; CHECK-NEXT: leaq 64(%rsp), %r9
- call void @foo2({ float, double }* byval @G, { float, double }* byval @G, { float, double }* byval @G, { float, double }* byval @G, { float, double }* byval @G, i64 10)
+ call void @foo2({ float, double }* byval({ float, double }) @G, { float, double }* byval({ float, double }) @G, { float, double }* byval({ float, double }) @G, { float, double }* byval({ float, double }) @G, { float, double }* byval({ float, double }) @G, i64 10)
ret void
}
target datalayout = "e-m:x-p:32:32-i64:64-f80:32-n8:16:32-a:0:32-S32"
target triple = "i386-pc-windows-msvc"
-define x86_fastcallcc i32 @test1(i32 inreg %V, [65533 x i8]* byval %p_arg) {
+define x86_fastcallcc i32 @test1(i32 inreg %V, [65533 x i8]* byval([65533 x i8]) %p_arg) {
ret i32 %V
}
; CHECK-LABEL: @test1@65540:
; CHECK-NEXT: pushl %ecx
; CHECK-NEXT: retl
-define x86_stdcallcc void @test2([65533 x i8]* byval %p_arg) {
+define x86_stdcallcc void @test2([65533 x i8]* byval([65533 x i8]) %p_arg) {
ret void
}
; CHECK-LABEL: _test2@65536:
; CHECK: retsp 1
%struct.st0 = type { [0 x i32] }
declare void @f0(%struct.st0*) nounwind
-define void @f0Test(%struct.st0* byval %s0) nounwind {
+define void @f0Test(%struct.st0* byval(%struct.st0) %s0) nounwind {
entry:
call void @f0(%struct.st0* %s0) nounwind
ret void
; CHECK: retsp 13
%struct.st1 = type { [10 x i32] }
declare void @f1(%struct.st1*) nounwind
-define i32 @f1Test(i32 %i, %struct.st1* byval %s1) nounwind {
+define i32 @f1Test(i32 %i, %struct.st1* byval(%struct.st1) %s1) nounwind {
entry:
call void @f1(%struct.st1* %s1) nounwind
ret i32 %i
; CHECK: retsp 0
%struct.st2 = type { i32 }
declare void @f2(i32, %struct.st2*) nounwind
-define void @f2Test(%struct.st2* byval %s2, i32 %i, ...) nounwind {
+define void @f2Test(%struct.st2* byval(%struct.st2) %s2, i32 %i, ...) nounwind {
entry:
call void @f2(i32 %i, %struct.st2* %s2)
ret void
; CHECK: bl f
; CHECK: retsp 2
declare void @f3(i8*) nounwind
-define void @f3Test(i8* byval %v) nounwind {
+define void @f3Test(i8* byval(i8) %v) nounwind {
entry:
call void @f3(i8* %v) nounwind
ret void
; RUN: llc -O0 %s -o /dev/null
-define void @CGRectStandardize(i32* sret %agg.result, i32* byval %rect) nounwind ssp !dbg !0 {
+define void @CGRectStandardize(i32* sret %agg.result, i32* byval(i32) %rect) nounwind ssp !dbg !0 {
entry:
call void @llvm.dbg.declare(metadata i32* %rect, metadata !23, metadata !DIExpression()), !dbg !24
ret void
@llvm.used = appending global [5 x i8*] [i8* getelementptr inbounds ([7 x i8], [7 x i8]* @"\01L_OBJC_CLASS_NAME_", i32 0, i32 0), i8* getelementptr inbounds ([32 x i8], [32 x i8]* @"\01L_OBJC_METH_VAR_NAME_", i32 0, i32 0), i8* getelementptr inbounds ([23 x i8], [23 x i8]* @"\01L_OBJC_METH_VAR_TYPE_", i32 0, i32 0), i8* bitcast ({ i32, i32, [1 x %struct._objc_method] }* @"\01l_OBJC_$_INSTANCE_METHODS_Bitmap" to i8*), i8* bitcast ([1 x i8*]* @"\01L_OBJC_LABEL_CLASS_$" to i8*)], section "llvm.metadata"
; Function Attrs: ssp uwtable
-define internal i8* @"\01-[Bitmap initWithCopy:andInfo:andLength:]"(%0* %self, i8* %_cmd, %0* %otherBitmap, %struct.ImageInfo* byval align 8 %info, i64 %length) #0 !dbg !7 {
+define internal i8* @"\01-[Bitmap initWithCopy:andInfo:andLength:]"(%0* %self, i8* %_cmd, %0* %otherBitmap, %struct.ImageInfo* byval(%struct.ImageInfo) align 8 %info, i64 %length) #0 !dbg !7 {
entry:
%retval = alloca i8*, align 8
%self.addr = alloca %0*, align 8
%struct.Pt = type { double, double }
%struct.Rect = type { %struct.Pt, %struct.Pt }
-define double @foo(%struct.Rect* byval %my_r0) nounwind ssp !dbg !1 {
+define double @foo(%struct.Rect* byval(%struct.Rect) %my_r0) nounwind ssp !dbg !1 {
entry:
%retval = alloca double ; <double*> [#uses=2]
%0 = alloca double ; <double*> [#uses=2]
declare void @llvm.dbg.declare(metadata, metadata, metadata)
-define void @f(i32* byval %p, i1 %c) !dbg !5 {
+define void @f(i32* byval(i32) %p, i1 %c) !dbg !5 {
br i1 %c, label %x, label %y
x:
%struct.Inner = type { i32, i64 }
; Function Attrs: nounwind ssp uwtable
-define i32 @foo(%struct.Outer* byval align 8 %outer) #0 !dbg !4 {
+define i32 @foo(%struct.Outer* byval(%struct.Outer) align 8 %outer) #0 !dbg !4 {
entry:
call void @llvm.dbg.declare(metadata %struct.Outer* %outer, metadata !25, metadata !DIExpression()), !dbg !26
%i1.sroa.0.0..sroa_idx = getelementptr inbounds %struct.Outer, %struct.Outer* %outer, i64 0, i32 0, i64 1, i32 0, !dbg !27
@__safestack_unsafe_stack_ptr = external thread_local(initialexec) global i8*
; Function Attrs: norecurse nounwind readonly safestack uwtable
-define i32 @_Z1f1Sm(%struct.S* byval nocapture readonly align 8 %zzz, i64 %len) #0 !dbg !12 {
+define i32 @_Z1f1Sm(%struct.S* byval(%struct.S) nocapture readonly align 8 %zzz, i64 %len) #0 !dbg !12 {
entry:
%unsafe_stack_ptr = load i8*, i8** @__safestack_unsafe_stack_ptr, !dbg !22
%unsafe_stack_static_top = getelementptr i8, i8* %unsafe_stack_ptr, i32 -400, !dbg !22
%struct.Inner = type { i32, i64 }
; Function Attrs: nounwind ssp uwtable
-define i32 @foo(%struct.Outer* byval align 8 %outer) #0 !dbg !4 {
+define i32 @foo(%struct.Outer* byval(%struct.Outer) align 8 %outer) #0 !dbg !4 {
entry:
%i1 = alloca %struct.Inner, align 8
call void @llvm.dbg.declare(metadata %struct.Outer* %outer, metadata !25, metadata !2), !dbg !26
%4 = bitcast %struct.r* %agg.tmp to i8*, !dbg !33
%5 = bitcast %struct.r* %r to i8*, !dbg !33
call void @llvm.memcpy.p0i8.p0i8.i64(i8* align 8 %4, i8* align 8 %5, i64 40, i1 false), !dbg !33
- %call4 = call i32 @_Z7call_me1r(%struct.r* byval align 8 %agg.tmp), !dbg !33
+ %call4 = call i32 @_Z7call_me1r(%struct.r* byval(%struct.r) align 8 %agg.tmp), !dbg !33
store i32 %call4, i32* %retval, !dbg !33
br label %return, !dbg !33
; Function Attrs: nounwind
declare void @llvm.memcpy.p0i8.p0i8.i64(i8* nocapture, i8* nocapture readonly, i64, i1) #3
-declare i32 @_Z7call_me1r(%struct.r* byval align 8)
+declare i32 @_Z7call_me1r(%struct.r* byval(%struct.r) align 8)
attributes #0 = { nounwind }
attributes #2 = { nounwind readnone }
; Test behavior for named argument with explicit alignment. The memcpy and
; alloca alignments should match the explicit alignment of 64.
-define void @foo(%struct.A* byval align 64 %a) sanitize_address {
+define void @foo(%struct.A* byval(%struct.A) align 64 %a) sanitize_address {
entry:
; CHECK-LABEL: foo
; CHECK: call i64 @__asan_stack_malloc
; minimum alignment of 4 bytes since struct.A contains i32s which have 4-byte
; alignment. However, the alloca alignment will be 32 since that is the value
; passed via the -asan-realign-stack option, which is greater than 4.
-define void @baz(%struct.A* byval) sanitize_address {
+define void @baz(%struct.A* byval(%struct.A)) sanitize_address {
entry:
; CHECK-LABEL: baz
; CHECK: call i64 @__asan_stack_malloc
}
; CHECK: @f11
-define void @f11(i128* byval %x) nounwind {
+define void @f11(i128* byval(i128) %x) nounwind {
%1 = bitcast i128* %x to i8*
%2 = getelementptr inbounds i8, i8* %1, i64 16
; CHECK: br label
}
; CHECK: @f11_as1
-define void @f11_as1(i128 addrspace(1)* byval %x) nounwind {
+define void @f11_as1(i128 addrspace(1)* byval(i128) %x) nounwind {
%1 = bitcast i128 addrspace(1)* %x to i8 addrspace(1)*
%2 = getelementptr inbounds i8, i8 addrspace(1)* %1, i16 16
; CHECK: br label
; Check 8-aligned byval.
define i32 @bar6([2 x i64]* %arg) {
- %1 = call i32 (i32, ...) @foo(i32 0, [2 x i64]* byval align 8 %arg)
+ %1 = call i32 (i32, ...) @foo(i32 0, [2 x i64]* byval([2 x i64]) align 8 %arg)
ret i32 %1
}
; Check 16-aligned byval.
define i32 @bar7([4 x i64]* %arg) {
- %1 = call i32 (i32, ...) @foo(i32 0, [4 x i64]* byval align 16 %arg)
+ %1 = call i32 (i32, ...) @foo(i32 0, [4 x i64]* byval([4 x i64]) align 16 %arg)
ret i32 %1
}
; Check 8-aligned byval.
define i32 @bar6([2 x i64]* %arg) {
- %1 = call i32 (i32, ...) @foo(i32 0, [2 x i64]* byval align 8 %arg)
+ %1 = call i32 (i32, ...) @foo(i32 0, [2 x i64]* byval([2 x i64]) align 8 %arg)
ret i32 %1
}
; Check 16-aligned byval.
define i32 @bar7([4 x i64]* %arg) {
- %1 = call i32 (i32, ...) @foo(i32 0, [4 x i64]* byval align 16 %arg)
+ %1 = call i32 (i32, ...) @foo(i32 0, [4 x i64]* byval([4 x i64]) align 16 %arg)
ret i32 %1
}
define void @Caller() sanitize_memory {
entry:
%agg.tmp = alloca %struct.S, align 16
- call void @Callee(i32 1, %struct.S* byval align 16 %agg.tmp)
+ call void @Callee(i32 1, %struct.S* byval(%struct.S) align 16 %agg.tmp)
ret void
}
-declare void @Callee(i32, %struct.S* byval align 16)
+declare void @Callee(i32, %struct.S* byval(%struct.S) align 16)
; Test byval argument shadow alignment
-define <2 x i64> @ByValArgumentShadowLargeAlignment(<2 x i64>* byval %p) sanitize_memory {
+define <2 x i64> @ByValArgumentShadowLargeAlignment(<2 x i64>* byval(<2 x i64>) %p) sanitize_memory {
entry:
%x = load <2 x i64>, <2 x i64>* %p
ret <2 x i64> %x
; CHECK: ret <2 x i64>
-define i16 @ByValArgumentShadowSmallAlignment(i16* byval %p) sanitize_memory {
+define i16 @ByValArgumentShadowSmallAlignment(i16* byval(i16) %p) sanitize_memory {
entry:
%x = load i16, i16* %p
ret i16 %x
; CHECK-ORIGINS: %[[ORIGIN:.*]] = load
; CHECK: call void @__msan_warning_with_origin_noreturn(i32
; CHECK-ORIGINS-SAME %[[ORIGIN]])
-; CHECK-CONT:
+; CHECK-CONT:
; CHECK-NEXT: unreachable
; CHECK: br i1 %tobool
; CHECK: ret void
%agg.tmp.sroa.2.0.copyload = load i64, i64* %agg.tmp.sroa.2.0..sroa_cast, align 4
%1 = bitcast %struct.StructByVal* %agg.tmp2 to i8*
call void @llvm.memcpy.p0i8.p0i8.i64(i8* align 4 %1, i8* align 4 %0, i64 16, i1 false)
- call void (i32, ...) @VAArgStructFn(i32 undef, i64 %agg.tmp.sroa.0.0.copyload, i64 %agg.tmp.sroa.2.0.copyload, i64 %agg.tmp.sroa.0.0.copyload, i64 %agg.tmp.sroa.2.0.copyload, %struct.StructByVal* byval align 8 %agg.tmp2)
+ call void (i32, ...) @VAArgStructFn(i32 undef, i64 %agg.tmp.sroa.0.0.copyload, i64 %agg.tmp.sroa.2.0.copyload, i64 %agg.tmp.sroa.0.0.copyload, i64 %agg.tmp.sroa.2.0.copyload, %struct.StructByVal* byval(%struct.StructByVal) align 8 %agg.tmp2)
ret void
}
%agg.tmp.sroa.2.0.copyload = load i64, i64* %agg.tmp.sroa.2.0..sroa_cast, align 4
%1 = bitcast %struct.StructByVal* %agg.tmp2 to i8*
call void @llvm.memcpy.p0i8.p0i8.i64(i8* align 4 %1, i8* align 4 %0, i64 16, i1 false)
- call void (i32, ...) @VAArgStructFn(i32 undef, i64 %agg.tmp.sroa.0.0.copyload, i64 %agg.tmp.sroa.2.0.copyload, i64 %agg.tmp.sroa.0.0.copyload, i64 %agg.tmp.sroa.2.0.copyload, %struct.StructByVal* byval align 8 %agg.tmp2)
+ call void (i32, ...) @VAArgStructFn(i32 undef, i64 %agg.tmp.sroa.0.0.copyload, i64 %agg.tmp.sroa.2.0.copyload, i64 %agg.tmp.sroa.0.0.copyload, i64 %agg.tmp.sroa.2.0.copyload, %struct.StructByVal* byval(%struct.StructByVal) align 8 %agg.tmp2)
ret void
}
; RUN: llvm-link %s %p/func-attrs-b.ll -S -o - | FileCheck %s
; PR2382
-; CHECK: call void @check0(%struct.S0* sret null, %struct.S0* byval align 4 null, %struct.S0* align 4 null, %struct.S0* byval align 4 null)
-; CHECK: define void @check0(%struct.S0* sret %agg.result, %struct.S0* byval %arg0, %struct.S0* %arg1, %struct.S0* byval %arg2)
+; CHECK: call void @check0(%struct.S0* sret null, %struct.S0* byval(%struct.S0) align 4 null, %struct.S0* align 4 null, %struct.S0* byval(%struct.S0) align 4 null)
+; CHECK: define void @check0(%struct.S0* sret %agg.result, %struct.S0* byval(%struct.S0) %arg0, %struct.S0* %arg1, %struct.S0* byval(%struct.S0) %arg2)
%struct.S0 = type <{ i8, i8, i8, i8 }>
define void @a() {
- call void @check0(%struct.S0* sret null, %struct.S0* byval align 4 null, %struct.S0* align 4 null, %struct.S0* byval align 4 null)
+ call void @check0(%struct.S0* sret null, %struct.S0* byval(%struct.S0) align 4 null, %struct.S0* align 4 null, %struct.S0* byval(%struct.S0) align 4 null)
ret void
}
%struct.S0 = type <{ i8, i8, i8, i8 }>
-define void @check0(%struct.S0* sret %agg.result, %struct.S0* byval %arg0, %struct.S0* %arg1, %struct.S0* byval %arg2) {
+define void @check0(%struct.S0* sret %agg.result, %struct.S0* byval(%struct.S0) %arg0, %struct.S0* %arg1, %struct.S0* byval(%struct.S0) %arg2) {
ret void
}
%struct.ss = type { i32, i64 }
; Don't drop 'byval' on %X here.
-define internal void @f(%struct.ss* byval %b, i32* byval %X, i32 %i) nounwind {
+define internal void @f(%struct.ss* byval(%struct.ss) %b, i32* byval(i32) %X, i32 %i) nounwind {
; CHECK-LABEL: define {{[^@]+}}@f
-; CHECK-SAME: (i32 [[B_0:%.*]], i64 [[B_1:%.*]], i32* byval [[X:%.*]], i32 [[I:%.*]])
+; CHECK-SAME: (i32 [[B_0:%.*]], i64 [[B_1:%.*]], i32* byval(i32) [[X:%.*]], i32 [[I:%.*]])
; CHECK-NEXT: entry:
; CHECK-NEXT: [[B:%.*]] = alloca [[STRUCT_SS:%.*]], align 8
; CHECK-NEXT: [[DOT0:%.*]] = getelementptr [[STRUCT_SS]], %struct.ss* [[B]], i32 0, i32 0
; CHECK-NEXT: [[S_0_VAL:%.*]] = load i32, i32* [[S_0]], align 4
; CHECK-NEXT: [[S_1:%.*]] = getelementptr [[STRUCT_SS]], %struct.ss* [[S]], i32 0, i32 1
; CHECK-NEXT: [[S_1_VAL:%.*]] = load i64, i64* [[S_1]], align 4
-; CHECK-NEXT: call void @f(i32 [[S_0_VAL]], i64 [[S_1_VAL]], i32* byval [[X]], i32 zeroext 0)
+; CHECK-NEXT: call void @f(i32 [[S_0_VAL]], i64 [[S_1_VAL]], i32* byval(i32) [[X]], i32 zeroext 0)
; CHECK-NEXT: ret i32 0
;
entry:
%tmp4 = getelementptr %struct.ss, %struct.ss* %S, i32 0, i32 1
store i64 2, i64* %tmp4, align 4
- call void @f( %struct.ss* byval %S, i32* byval %X, i32 zeroext 0)
+ call void @f(%struct.ss* byval(%struct.ss) %S, i32* byval(i32) %X, i32 zeroext 0)
ret i32 0
}
%struct.ss = type { i32, i64 }
-define internal void @f(%struct.ss* byval %b, i32* byval %X) nounwind {
+define internal void @f(%struct.ss* byval(%struct.ss) %b, i32* byval(i32) %X) nounwind {
; CHECK-LABEL: define {{[^@]+}}@f
-; CHECK-SAME: (i32 [[B_0:%.*]], i64 [[B_1:%.*]], i32* byval [[X:%.*]])
+; CHECK-SAME: (i32 [[B_0:%.*]], i64 [[B_1:%.*]], i32* byval(i32) [[X:%.*]])
; CHECK-NEXT: entry:
; CHECK-NEXT: [[B:%.*]] = alloca [[STRUCT_SS:%.*]], align 8
; CHECK-NEXT: [[DOT0:%.*]] = getelementptr [[STRUCT_SS]], %struct.ss* [[B]], i32 0, i32 0
; CHECK-NEXT: [[S_0_VAL:%.*]] = load i32, i32* [[S_0]], align 4
; CHECK-NEXT: [[S_1:%.*]] = getelementptr [[STRUCT_SS]], %struct.ss* [[S]], i32 0, i32 1
; CHECK-NEXT: [[S_1_VAL:%.*]] = load i64, i64* [[S_1]], align 4
-; CHECK-NEXT: call void @f(i32 [[S_0_VAL]], i64 [[S_1_VAL]], i32* byval [[X]])
+; CHECK-NEXT: call void @f(i32 [[S_0_VAL]], i64 [[S_1_VAL]], i32* byval(i32) [[X]])
; CHECK-NEXT: ret i32 0
;
entry:
store i32 1, i32* %tmp1, align 8
%tmp4 = getelementptr %struct.ss, %struct.ss* %S, i32 0, i32 1
store i64 2, i64* %tmp4, align 4
- call void @f( %struct.ss* byval %S, i32* byval %X)
+ call void @f(%struct.ss* byval(%struct.ss) %S, i32* byval(i32) %X)
ret i32 0
}
%struct.ss = type { i32, i64 }
-define internal void @f(%struct.ss* byval %b) nounwind {
+define internal void @f(%struct.ss* byval(%struct.ss) %b) nounwind {
; CHECK-LABEL: define {{[^@]+}}@f
; CHECK-SAME: (i32 [[B_0:%.*]], i64 [[B_1:%.*]])
; CHECK-NEXT: entry:
}
-define internal void @g(%struct.ss* byval align 32 %b) nounwind {
+define internal void @g(%struct.ss* byval(%struct.ss) align 32 %b) nounwind {
; CHECK-LABEL: define {{[^@]+}}@g
; CHECK-SAME: (i32 [[B_0:%.*]], i64 [[B_1:%.*]])
; CHECK-NEXT: entry:
store i32 1, i32* %tmp1, align 8
%tmp4 = getelementptr %struct.ss, %struct.ss* %S, i32 0, i32 1
store i64 2, i64* %tmp4, align 4
- call void @f(%struct.ss* byval %S) nounwind
- call void @g(%struct.ss* byval %S) nounwind
+ call void @f(%struct.ss* byval(%struct.ss) %S) nounwind
+ call void @g(%struct.ss* byval(%struct.ss) %S) nounwind
ret i32 0
}
%struct.pair = type { i32, i32 }
-define internal void @test_byval(%struct.pair* byval %P) {
+define internal void @test_byval(%struct.pair* byval(%struct.pair) %P) {
; CHECK-LABEL: define {{[^@]+}}@test_byval
; CHECK-SAME: (i32 [[P_0:%.*]], i32 [[P_1:%.*]])
; CHECK-NEXT: [[P:%.*]] = alloca [[STRUCT_PAIR:%.*]], align 8
define void @run() {
; CHECK-LABEL: define {{[^@]+}}@run()
; CHECK-NEXT: entry:
-; CHECK-NEXT: [[TMP0:%.*]] = tail call i8 @UseLongDoubleUnsafely(%union.u* byval align 16 bitcast (%struct.s* @b to %union.u*))
+; CHECK-NEXT: [[TMP0:%.*]] = tail call i8 @UseLongDoubleUnsafely(%union.u* byval(%union.u) align 16 bitcast (%struct.s* @b to %union.u*))
; CHECK-NEXT: [[DOT0:%.*]] = getelementptr [[UNION_U:%.*]], %union.u* bitcast (%struct.s* @b to %union.u*), i32 0, i32 0
; CHECK-NEXT: [[DOT0_VAL:%.*]] = load x86_fp80, x86_fp80* [[DOT0]]
; CHECK-NEXT: [[TMP1:%.*]] = tail call x86_fp80 @UseLongDoubleSafely(x86_fp80 [[DOT0_VAL]])
; CHECK-NEXT: ret void
;
entry:
- tail call i8 @UseLongDoubleUnsafely(%union.u* byval align 16 bitcast (%struct.s* @b to %union.u*))
- tail call x86_fp80 @UseLongDoubleSafely(%union.u* byval align 16 bitcast (%struct.s* @b to %union.u*))
+ tail call i8 @UseLongDoubleUnsafely(%union.u* byval(%union.u) align 16 bitcast (%struct.s* @b to %union.u*))
+ tail call x86_fp80 @UseLongDoubleSafely(%union.u* byval(%union.u) align 16 bitcast (%struct.s* @b to %union.u*))
call i64 @AccessPaddingOfStruct(%struct.Foo* @a)
call i64 @CaptureAStruct(%struct.Foo* @a)
ret void
}
-define internal i8 @UseLongDoubleUnsafely(%union.u* byval align 16 %arg) {
+define internal i8 @UseLongDoubleUnsafely(%union.u* byval(%union.u) align 16 %arg) {
; CHECK-LABEL: define {{[^@]+}}@UseLongDoubleUnsafely
-; CHECK-SAME: (%union.u* byval align 16 [[ARG:%.*]])
+; CHECK-SAME: (%union.u* byval(%union.u) align 16 [[ARG:%.*]])
; CHECK-NEXT: entry:
; CHECK-NEXT: [[BITCAST:%.*]] = bitcast %union.u* [[ARG]] to %struct.s*
; CHECK-NEXT: [[GEP:%.*]] = getelementptr inbounds [[STRUCT_S:%.*]], %struct.s* [[BITCAST]], i64 0, i32 2
ret i8 %result
}
-define internal x86_fp80 @UseLongDoubleSafely(%union.u* byval align 16 %arg) {
+define internal x86_fp80 @UseLongDoubleSafely(%union.u* byval(%union.u) align 16 %arg) {
; CHECK-LABEL: define {{[^@]+}}@UseLongDoubleSafely
; CHECK-SAME: (x86_fp80 [[ARG_0:%.*]])
; CHECK-NEXT: [[ARG:%.*]] = alloca [[UNION_U:%.*]], align 16
ret x86_fp80 %fp80
}
-define internal i64 @AccessPaddingOfStruct(%struct.Foo* byval %a) {
+define internal i64 @AccessPaddingOfStruct(%struct.Foo* byval(%struct.Foo) %a) {
; CHECK-LABEL: define {{[^@]+}}@AccessPaddingOfStruct
-; CHECK-SAME: (%struct.Foo* byval [[A:%.*]])
+; CHECK-SAME: (%struct.Foo* byval(%struct.Foo) [[A:%.*]])
; CHECK-NEXT: [[P:%.*]] = bitcast %struct.Foo* [[A]] to i64*
; CHECK-NEXT: [[V:%.*]] = load i64, i64* [[P]]
; CHECK-NEXT: ret i64 [[V]]
ret i64 %v
}
-define internal i64 @CaptureAStruct(%struct.Foo* byval %a) {
+define internal i64 @CaptureAStruct(%struct.Foo* byval(%struct.Foo) %a) {
; CHECK-LABEL: define {{[^@]+}}@CaptureAStruct
-; CHECK-SAME: (%struct.Foo* byval [[A:%.*]])
+; CHECK-SAME: (%struct.Foo* byval(%struct.Foo) [[A:%.*]])
; CHECK-NEXT: entry:
; CHECK-NEXT: [[A_PTR:%.*]] = alloca %struct.Foo*
; CHECK-NEXT: br label [[LOOP:%.*]]
declare i8* @foo(%pair*)
-define internal void @bar(%pair* byval %Data) {
+define internal void @bar(%pair* byval(%pair) %Data) {
; CHECK-LABEL: define {{[^@]+}}@bar
; CHECK-SAME: (i32 [[DATA_0:%.*]], i32 [[DATA_1:%.*]])
; CHECK-NEXT: [[DATA:%.*]] = alloca [[PAIR:%.*]], align 8
ret void
}
-define void @zed(%pair* byval %Data) {
+define void @zed(%pair* byval(%pair) %Data) {
; CHECK-LABEL: define {{[^@]+}}@zed
-; CHECK-SAME: (%pair* byval [[DATA:%.*]])
+; CHECK-SAME: (%pair* byval(%pair) [[DATA:%.*]])
; CHECK-NEXT: [[DATA_0:%.*]] = getelementptr [[PAIR:%.*]], %pair* [[DATA]], i32 0, i32 0
; CHECK-NEXT: [[DATA_0_VAL:%.*]] = load i32, i32* [[DATA_0]], align 4
; CHECK-NEXT: [[DATA_1:%.*]] = getelementptr [[PAIR]], %pair* [[DATA]], i32 0, i32 1
; CHECK-NEXT: call void @bar(i32 [[DATA_0_VAL]], i32 [[DATA_1_VAL]])
; CHECK-NEXT: ret void
;
- call void @bar(%pair* byval %Data)
+ call void @bar(%pair* byval(%pair) %Data)
ret void
}
; CHECK-LABEL: define {{[^@]+}}@main
; CHECK-SAME: (i32 [[ARGC:%.*]], i8** nocapture readnone [[ARGV:%.*]])
; CHECK-NEXT: entry:
-; CHECK-NEXT: tail call void (i8*, i8*, i8*, i8*, i8*, ...) @callee_t0f(i8* undef, i8* undef, i8* undef, i8* undef, i8* undef, %struct.tt0* byval align 8 @t45)
+; CHECK-NEXT: tail call void (i8*, i8*, i8*, i8*, i8*, ...) @callee_t0f(i8* undef, i8* undef, i8* undef, i8* undef, i8* undef, %struct.tt0* byval(%struct.tt0) align 8 @t45)
; CHECK-NEXT: ret i32 0
;
entry:
- tail call void (i8*, i8*, i8*, i8*, i8*, ...) @callee_t0f(i8* undef, i8* undef, i8* undef, i8* undef, i8* undef, %struct.tt0* byval align 8 @t45)
+ tail call void (i8*, i8*, i8*, i8*, i8*, ...) @callee_t0f(i8* undef, i8* undef, i8* undef, i8* undef, i8* undef, %struct.tt0* byval(%struct.tt0) align 8 @t45)
ret i32 0
}
%struct.ss = type { i32, i64 }
; Don't drop 'byval' on %X here.
-define internal i32 @f(%struct.ss* byval %b, i32* byval %X, i32 %i) nounwind {
+define internal i32 @f(%struct.ss* byval(%struct.ss) %b, i32* byval(i32) %X, i32 %i) nounwind {
;
; IS__TUNIT_OPM: Function Attrs: nofree nosync nounwind readnone willreturn
; IS__TUNIT_OPM-LABEL: define {{[^@]+}}@f
-; IS__TUNIT_OPM-SAME: (%struct.ss* noalias nocapture nofree noundef nonnull byval align 8 dereferenceable(12) [[B:%.*]], i32* noalias nocapture nofree nonnull byval align 4 dereferenceable(4) [[X:%.*]], i32 noundef [[I:%.*]]) [[ATTR0:#.*]] {
+; IS__TUNIT_OPM-SAME: (%struct.ss* noalias nocapture nofree noundef nonnull byval(%struct.ss) align 8 dereferenceable(12) [[B:%.*]], i32* noalias nocapture nofree nonnull byval(i32) align 4 dereferenceable(4) [[X:%.*]], i32 noundef [[I:%.*]]) [[ATTR0:#.*]] {
; IS__TUNIT_OPM-NEXT: entry:
; IS__TUNIT_OPM-NEXT: [[TMP:%.*]] = getelementptr [[STRUCT_SS:%.*]], %struct.ss* [[B]], i32 0, i32 0
; IS__TUNIT_OPM-NEXT: [[TMP1:%.*]] = load i32, i32* [[TMP]], align 8
;
; IS__CGSCC_OPM: Function Attrs: nofree norecurse nosync nounwind readnone willreturn
; IS__CGSCC_OPM-LABEL: define {{[^@]+}}@f
-; IS__CGSCC_OPM-SAME: (%struct.ss* noalias nocapture nofree noundef nonnull byval align 8 dereferenceable(12) [[B:%.*]], i32* noalias nocapture nofree nonnull byval align 4 dereferenceable(4) [[X:%.*]]) [[ATTR0:#.*]] {
+; IS__CGSCC_OPM-SAME: (%struct.ss* noalias nocapture nofree noundef nonnull byval(%struct.ss) align 8 dereferenceable(12) [[B:%.*]], i32* noalias nocapture nofree nonnull byval(i32) align 4 dereferenceable(4) [[X:%.*]]) [[ATTR0:#.*]] {
; IS__CGSCC_OPM-NEXT: entry:
; IS__CGSCC_OPM-NEXT: [[TMP:%.*]] = getelementptr [[STRUCT_SS:%.*]], %struct.ss* [[B]], i32 0, i32 0
; IS__CGSCC_OPM-NEXT: [[TMP1:%.*]] = load i32, i32* [[TMP]], align 8
; IS__TUNIT_OPM-NEXT: store i32 1, i32* [[TMP1]], align 8
; IS__TUNIT_OPM-NEXT: [[TMP4:%.*]] = getelementptr [[STRUCT_SS]], %struct.ss* [[S]], i32 0, i32 1
; IS__TUNIT_OPM-NEXT: store i64 2, i64* [[TMP4]], align 4
-; IS__TUNIT_OPM-NEXT: [[C:%.*]] = call i32 @f(%struct.ss* noalias nocapture nofree noundef nonnull readonly byval align 8 dereferenceable(12) [[S]], i32* nocapture nofree readonly byval align 4 [[X]], i32 noundef zeroext 0) [[ATTR0]]
+; IS__TUNIT_OPM-NEXT: [[C:%.*]] = call i32 @f(%struct.ss* noalias nocapture nofree noundef nonnull readonly byval(%struct.ss) align 8 dereferenceable(12) [[S]], i32* nocapture nofree readonly byval(i32) align 4 [[X]], i32 noundef zeroext 0) [[ATTR0]]
; IS__TUNIT_OPM-NEXT: ret i32 [[C]]
;
; IS__TUNIT_NPM: Function Attrs: nofree nosync nounwind readnone willreturn
; IS__CGSCC_OPM-NEXT: store i32 1, i32* [[TMP1]], align 8
; IS__CGSCC_OPM-NEXT: [[TMP4:%.*]] = getelementptr [[STRUCT_SS]], %struct.ss* [[S]], i32 0, i32 1
; IS__CGSCC_OPM-NEXT: store i64 2, i64* [[TMP4]], align 4
-; IS__CGSCC_OPM-NEXT: [[C:%.*]] = call i32 @f(%struct.ss* noalias nocapture nofree noundef nonnull readnone byval align 8 dereferenceable(12) [[S]], i32* noalias nocapture nofree nonnull readnone byval align 4 dereferenceable(4) [[X]]) [[ATTR1:#.*]]
+; IS__CGSCC_OPM-NEXT: [[C:%.*]] = call i32 @f(%struct.ss* noalias nocapture nofree noundef nonnull readnone byval(%struct.ss) align 8 dereferenceable(12) [[S]], i32* noalias nocapture nofree nonnull readnone byval(i32) align 4 dereferenceable(4) [[X]]) [[ATTR1:#.*]]
; IS__CGSCC_OPM-NEXT: ret i32 [[C]]
;
; IS__CGSCC_NPM: Function Attrs: argmemonly nofree norecurse nosync nounwind willreturn
%tmp4 = getelementptr %struct.ss, %struct.ss* %S, i32 0, i32 1
store i64 2, i64* %tmp4, align 4
- %c = call i32 @f( %struct.ss* byval %S, i32* byval %X, i32 zeroext 0)
+ %c = call i32 @f(%struct.ss* byval(%struct.ss) %S, i32* byval(i32) %X, i32 zeroext 0)
ret i32 %c
}
%struct.ss = type { i32, i64 }
-define internal void @f(%struct.ss* byval %b, i32* byval %X) nounwind {
+define internal void @f(%struct.ss* byval(%struct.ss) %b, i32* byval(i32) %X) nounwind {
; IS__CGSCC_OPM: Function Attrs: nofree norecurse nosync nounwind readnone willreturn
; IS__CGSCC_OPM-LABEL: define {{[^@]+}}@f
-; IS__CGSCC_OPM-SAME: (%struct.ss* noalias nocapture nofree noundef nonnull byval align 8 dereferenceable(12) [[B:%.*]], i32* noalias nocapture nofree nonnull writeonly byval align 4 dereferenceable(4) [[X:%.*]]) [[ATTR0:#.*]] {
+; IS__CGSCC_OPM-SAME: (%struct.ss* noalias nocapture nofree noundef nonnull byval(%struct.ss) align 8 dereferenceable(12) [[B:%.*]], i32* noalias nocapture nofree nonnull writeonly byval(i32) align 4 dereferenceable(4) [[X:%.*]]) [[ATTR0:#.*]] {
; IS__CGSCC_OPM-NEXT: entry:
; IS__CGSCC_OPM-NEXT: [[TMP:%.*]] = getelementptr [[STRUCT_SS:%.*]], %struct.ss* [[B]], i32 0, i32 0
; IS__CGSCC_OPM-NEXT: [[TMP1:%.*]] = load i32, i32* [[TMP]], align 8
store i32 1, i32* %tmp1, align 8
%tmp4 = getelementptr %struct.ss, %struct.ss* %S, i32 0, i32 1
store i64 2, i64* %tmp4, align 4
- call void @f( %struct.ss* byval %S, i32* byval %X)
+ call void @f(%struct.ss* byval(%struct.ss) %S, i32* byval(i32) %X)
ret i32 0
}
%struct.ss = type { i32, i64 }
-define internal i32 @f(%struct.ss* byval %b) nounwind {
+define internal i32 @f(%struct.ss* byval(%struct.ss) %b) nounwind {
; IS__TUNIT_OPM: Function Attrs: nofree nosync nounwind readnone willreturn
; IS__TUNIT_OPM-LABEL: define {{[^@]+}}@f
-; IS__TUNIT_OPM-SAME: (%struct.ss* noalias nocapture nofree noundef nonnull byval align 8 dereferenceable(12) [[B:%.*]]) [[ATTR0:#.*]] {
+; IS__TUNIT_OPM-SAME: (%struct.ss* noalias nocapture nofree noundef nonnull byval(%struct.ss) align 8 dereferenceable(12) [[B:%.*]]) [[ATTR0:#.*]] {
; IS__TUNIT_OPM-NEXT: entry:
; IS__TUNIT_OPM-NEXT: [[TMP:%.*]] = getelementptr [[STRUCT_SS:%.*]], %struct.ss* [[B]], i32 0, i32 0
; IS__TUNIT_OPM-NEXT: [[TMP1:%.*]] = load i32, i32* [[TMP]], align 8
;
; IS__CGSCC_OPM: Function Attrs: nofree norecurse nosync nounwind readnone willreturn
; IS__CGSCC_OPM-LABEL: define {{[^@]+}}@f
-; IS__CGSCC_OPM-SAME: (%struct.ss* noalias nocapture nofree noundef nonnull byval align 32 dereferenceable(12) [[B:%.*]]) [[ATTR0:#.*]] {
+; IS__CGSCC_OPM-SAME: (%struct.ss* noalias nocapture nofree noundef nonnull byval(%struct.ss) align 32 dereferenceable(12) [[B:%.*]]) [[ATTR0:#.*]] {
; IS__CGSCC_OPM-NEXT: entry:
; IS__CGSCC_OPM-NEXT: [[TMP:%.*]] = getelementptr [[STRUCT_SS:%.*]], %struct.ss* [[B]], i32 0, i32 0
; IS__CGSCC_OPM-NEXT: [[TMP1:%.*]] = load i32, i32* [[TMP]], align 32
}
-define internal i32 @g(%struct.ss* byval align 32 %b) nounwind {
+define internal i32 @g(%struct.ss* byval(%struct.ss) align 32 %b) nounwind {
; IS__TUNIT_OPM: Function Attrs: nofree nosync nounwind readnone willreturn
; IS__TUNIT_OPM-LABEL: define {{[^@]+}}@g
-; IS__TUNIT_OPM-SAME: (%struct.ss* noalias nocapture nofree noundef nonnull byval align 32 dereferenceable(12) [[B:%.*]]) [[ATTR0]] {
+; IS__TUNIT_OPM-SAME: (%struct.ss* noalias nocapture nofree noundef nonnull byval(%struct.ss) align 32 dereferenceable(12) [[B:%.*]]) [[ATTR0]] {
; IS__TUNIT_OPM-NEXT: entry:
; IS__TUNIT_OPM-NEXT: [[TMP:%.*]] = getelementptr [[STRUCT_SS:%.*]], %struct.ss* [[B]], i32 0, i32 0
; IS__TUNIT_OPM-NEXT: [[TMP1:%.*]] = load i32, i32* [[TMP]], align 32
;
; IS__CGSCC_OPM: Function Attrs: nofree norecurse nosync nounwind readnone willreturn
; IS__CGSCC_OPM-LABEL: define {{[^@]+}}@g
-; IS__CGSCC_OPM-SAME: (%struct.ss* noalias nocapture nofree noundef nonnull byval align 32 dereferenceable(12) [[B:%.*]]) [[ATTR0]] {
+; IS__CGSCC_OPM-SAME: (%struct.ss* noalias nocapture nofree noundef nonnull byval(%struct.ss) align 32 dereferenceable(12) [[B:%.*]]) [[ATTR0]] {
; IS__CGSCC_OPM-NEXT: entry:
; IS__CGSCC_OPM-NEXT: [[TMP:%.*]] = getelementptr [[STRUCT_SS:%.*]], %struct.ss* [[B]], i32 0, i32 0
; IS__CGSCC_OPM-NEXT: [[TMP1:%.*]] = load i32, i32* [[TMP]], align 32
; IS__TUNIT_OPM-NEXT: store i32 1, i32* [[TMP1]], align 8
; IS__TUNIT_OPM-NEXT: [[TMP4:%.*]] = getelementptr [[STRUCT_SS]], %struct.ss* [[S]], i32 0, i32 1
; IS__TUNIT_OPM-NEXT: store i64 2, i64* [[TMP4]], align 4
-; IS__TUNIT_OPM-NEXT: [[C0:%.*]] = call i32 @f(%struct.ss* noalias nocapture nofree noundef nonnull readonly byval align 8 dereferenceable(12) [[S]]) [[ATTR0]]
-; IS__TUNIT_OPM-NEXT: [[C1:%.*]] = call i32 @g(%struct.ss* noalias nocapture nofree noundef nonnull readonly byval align 32 dereferenceable(12) [[S]]) [[ATTR0]]
+; IS__TUNIT_OPM-NEXT: [[C0:%.*]] = call i32 @f(%struct.ss* noalias nocapture nofree noundef nonnull readonly byval(%struct.ss) align 8 dereferenceable(12) [[S]]) [[ATTR0]]
+; IS__TUNIT_OPM-NEXT: [[C1:%.*]] = call i32 @g(%struct.ss* noalias nocapture nofree noundef nonnull readonly byval(%struct.ss) align 32 dereferenceable(12) [[S]]) [[ATTR0]]
; IS__TUNIT_OPM-NEXT: [[A:%.*]] = add i32 [[C0]], [[C1]]
; IS__TUNIT_OPM-NEXT: ret i32 [[A]]
;
; IS__CGSCC_OPM-NEXT: store i32 1, i32* [[TMP1]], align 32
; IS__CGSCC_OPM-NEXT: [[TMP4:%.*]] = getelementptr [[STRUCT_SS]], %struct.ss* [[S]], i32 0, i32 1
; IS__CGSCC_OPM-NEXT: store i64 2, i64* [[TMP4]], align 4
-; IS__CGSCC_OPM-NEXT: [[C0:%.*]] = call i32 @f(%struct.ss* noalias nocapture nofree noundef nonnull readnone byval align 32 dereferenceable(12) [[S]]) [[ATTR1:#.*]]
-; IS__CGSCC_OPM-NEXT: [[C1:%.*]] = call i32 @g(%struct.ss* noalias nocapture nofree noundef nonnull readnone byval align 32 dereferenceable(12) [[S]]) [[ATTR1]]
+; IS__CGSCC_OPM-NEXT: [[C0:%.*]] = call i32 @f(%struct.ss* noalias nocapture nofree noundef nonnull readnone byval(%struct.ss) align 32 dereferenceable(12) [[S]]) [[ATTR1:#.*]]
+; IS__CGSCC_OPM-NEXT: [[C1:%.*]] = call i32 @g(%struct.ss* noalias nocapture nofree noundef nonnull readnone byval(%struct.ss) align 32 dereferenceable(12) [[S]]) [[ATTR1]]
; IS__CGSCC_OPM-NEXT: [[A:%.*]] = add i32 [[C0]], [[C1]]
; IS__CGSCC_OPM-NEXT: ret i32 [[A]]
;
store i32 1, i32* %tmp1, align 8
%tmp4 = getelementptr %struct.ss, %struct.ss* %S, i32 0, i32 1
store i64 2, i64* %tmp4, align 4
- %c0 = call i32 @f(%struct.ss* byval %S) nounwind
- %c1 = call i32 @g(%struct.ss* byval %S) nounwind
+ %c0 = call i32 @f(%struct.ss* byval(%struct.ss) %S) nounwind
+ %c1 = call i32 @g(%struct.ss* byval(%struct.ss) %S) nounwind
%a = add i32 %c0, %c1
ret i32 %a
}
%struct.pair = type { i32, i32 }
-define internal void @test_byval(%struct.pair* byval %P) {
+define internal void @test_byval(%struct.pair* byval(%struct.pair) %P) {
; CHECK-LABEL: define {{[^@]+}}@test_byval() {
; CHECK-NEXT: call void @sink(i32 noundef 0)
; CHECK-NEXT: ret void
; IS__CGSCC____-NEXT: unreachable
;
entry:
- tail call i8 @UseLongDoubleUnsafely(%union.u* byval align 16 bitcast (%struct.s* @b to %union.u*))
- tail call x86_fp80 @UseLongDoubleSafely(%union.u* byval align 16 bitcast (%struct.s* @b to %union.u*))
+ tail call i8 @UseLongDoubleUnsafely(%union.u* byval(%union.u) align 16 bitcast (%struct.s* @b to %union.u*))
+ tail call x86_fp80 @UseLongDoubleSafely(%union.u* byval(%union.u) align 16 bitcast (%struct.s* @b to %union.u*))
call i64 @AccessPaddingOfStruct(%struct.Foo* @a)
call i64 @CaptureAStruct(%struct.Foo* @a)
ret void
}
-define internal i8 @UseLongDoubleUnsafely(%union.u* byval align 16 %arg) {
+define internal i8 @UseLongDoubleUnsafely(%union.u* byval(%union.u) align 16 %arg) {
; IS__CGSCC____: Function Attrs: nofree norecurse nosync nounwind readnone willreturn
; IS__CGSCC____-LABEL: define {{[^@]+}}@UseLongDoubleUnsafely
; IS__CGSCC____-SAME: () [[ATTR1:#.*]] {
ret i8 %result
}
-define internal x86_fp80 @UseLongDoubleSafely(%union.u* byval align 16 %arg) {
+define internal x86_fp80 @UseLongDoubleSafely(%union.u* byval(%union.u) align 16 %arg) {
; IS__CGSCC____: Function Attrs: nofree norecurse nosync nounwind readnone willreturn
; IS__CGSCC____-LABEL: define {{[^@]+}}@UseLongDoubleSafely
; IS__CGSCC____-SAME: () [[ATTR1]] {
ret x86_fp80 %fp80
}
-define internal i64 @AccessPaddingOfStruct(%struct.Foo* byval %a) {
+define internal i64 @AccessPaddingOfStruct(%struct.Foo* byval(%struct.Foo) %a) {
; IS__CGSCC____: Function Attrs: nofree norecurse nosync nounwind readnone willreturn
; IS__CGSCC____-LABEL: define {{[^@]+}}@AccessPaddingOfStruct
; IS__CGSCC____-SAME: () [[ATTR1]] {
ret i64 %v
}
-define internal i64 @CaptureAStruct(%struct.Foo* byval %a) {
+define internal i64 @CaptureAStruct(%struct.Foo* byval(%struct.Foo) %a) {
; IS__CGSCC_OPM-LABEL: define {{[^@]+}}@CaptureAStruct
-; IS__CGSCC_OPM-SAME: (%struct.Foo* noalias nofree byval [[A:%.*]])
+; IS__CGSCC_OPM-SAME: (%struct.Foo* noalias nofree byval(%struct.Foo) [[A:%.*]])
; IS__CGSCC_OPM-NEXT: entry:
; IS__CGSCC_OPM-NEXT: [[A_PTR:%.*]] = alloca %struct.Foo*
; IS__CGSCC_OPM-NEXT: br label [[LOOP:%.*]]
declare i8* @foo(%pair*)
-define internal void @bar(%pair* byval %Data) {
+define internal void @bar(%pair* byval(%pair) %Data) {
; IS________OPM-LABEL: define {{[^@]+}}@bar
-; IS________OPM-SAME: (%pair* noalias nonnull byval dereferenceable(8) [[DATA:%.*]]) {
+; IS________OPM-SAME: (%pair* noalias nonnull byval(%pair) dereferenceable(8) [[DATA:%.*]]) {
; IS________OPM-NEXT: [[TMP1:%.*]] = tail call i8* @foo(%pair* nonnull dereferenceable(8) [[DATA]])
; IS________OPM-NEXT: ret void
;
ret void
}
-define void @zed(%pair* byval %Data) {
+define void @zed(%pair* byval(%pair) %Data) {
; IS________OPM-LABEL: define {{[^@]+}}@zed
-; IS________OPM-SAME: (%pair* noalias nocapture nonnull readonly byval dereferenceable(8) [[DATA:%.*]]) {
-; IS________OPM-NEXT: call void @bar(%pair* noalias nocapture nonnull readonly byval dereferenceable(8) [[DATA]])
+; IS________OPM-SAME: (%pair* noalias nocapture nonnull readonly byval(%pair) dereferenceable(8) [[DATA:%.*]]) {
+; IS________OPM-NEXT: call void @bar(%pair* noalias nocapture nonnull readonly byval(%pair) dereferenceable(8) [[DATA]])
; IS________OPM-NEXT: ret void
;
; IS________NPM-LABEL: define {{[^@]+}}@zed
-; IS________NPM-SAME: (%pair* noalias nocapture nonnull readonly byval dereferenceable(8) [[DATA:%.*]]) {
+; IS________NPM-SAME: (%pair* noalias nocapture nonnull readonly byval(%pair) dereferenceable(8) [[DATA:%.*]]) {
; IS________NPM-NEXT: [[DATA_CAST:%.*]] = bitcast %pair* [[DATA]] to i32*
; IS________NPM-NEXT: [[TMP1:%.*]] = load i32, i32* [[DATA_CAST]], align 1
; IS________NPM-NEXT: [[DATA_0_1:%.*]] = getelementptr [[PAIR:%.*]], %pair* [[DATA]], i32 0, i32 1
; IS________NPM-NEXT: call void @bar(i32 [[TMP1]], i32 [[TMP2]])
; IS________NPM-NEXT: ret void
;
- call void @bar(%pair* byval %Data)
+ call void @bar(%pair* byval(%pair) %Data)
ret void
}
; CHECK-LABEL: define {{[^@]+}}@main
; CHECK-SAME: (i32 [[ARGC:%.*]], i8** nocapture nofree readnone [[ARGV:%.*]]) {
; CHECK-NEXT: entry:
-; CHECK-NEXT: tail call void (i8*, i8*, i8*, i8*, i8*, ...) @callee_t0f(i8* undef, i8* undef, i8* undef, i8* undef, i8* undef, %struct.tt0* noundef nonnull byval align 8 dereferenceable(16) @t45)
+; CHECK-NEXT: tail call void (i8*, i8*, i8*, i8*, i8*, ...) @callee_t0f(i8* undef, i8* undef, i8* undef, i8* undef, i8* undef, %struct.tt0* noundef nonnull byval(%struct.tt0) align 8 dereferenceable(16) @t45)
; CHECK-NEXT: ret i32 0
;
entry:
- tail call void (i8*, i8*, i8*, i8*, i8*, ...) @callee_t0f(i8* undef, i8* undef, i8* undef, i8* undef, i8* undef, %struct.tt0* byval align 8 @t45)
+ tail call void (i8*, i8*, i8*, i8*, i8*, ...) @callee_t0f(i8* undef, i8* undef, i8* undef, i8* undef, i8* undef, %struct.tt0* byval(%struct.tt0) align 8 @t45)
ret i32 0
}
; PR5038
%struct.MYstr = type { i8, i32 }
@mystr = internal global %struct.MYstr zeroinitializer ; <%struct.MYstr*> [#uses=3]
-define internal void @vfu1(%struct.MYstr* byval align 4 %u) nounwind {
+define internal void @vfu1(%struct.MYstr* byval(%struct.MYstr) align 4 %u) nounwind {
; IS__CGSCC_OPM: Function Attrs: nofree norecurse nosync nounwind readnone willreturn
; IS__CGSCC_OPM-LABEL: define {{[^@]+}}@vfu1
-; IS__CGSCC_OPM-SAME: (%struct.MYstr* noalias nocapture nofree noundef nonnull writeonly byval align 8 dereferenceable(8) [[U:%.*]]) [[ATTR0:#.*]] {
+; IS__CGSCC_OPM-SAME: (%struct.MYstr* noalias nocapture nofree noundef nonnull writeonly byval(%struct.MYstr) align 8 dereferenceable(8) [[U:%.*]]) [[ATTR0:#.*]] {
; IS__CGSCC_OPM-NEXT: entry:
; IS__CGSCC_OPM-NEXT: [[TMP0:%.*]] = getelementptr [[STRUCT_MYSTR:%.*]], %struct.MYstr* [[U]], i32 0, i32 1
; IS__CGSCC_OPM-NEXT: store i32 99, i32* [[TMP0]], align 4
ret void
}
-define internal i32 @vfu2(%struct.MYstr* byval align 4 %u) nounwind readonly {
+define internal i32 @vfu2(%struct.MYstr* byval(%struct.MYstr) align 4 %u) nounwind readonly {
; IS__TUNIT_OPM: Function Attrs: nofree nosync nounwind readonly willreturn
; IS__TUNIT_OPM-LABEL: define {{[^@]+}}@vfu2
-; IS__TUNIT_OPM-SAME: (%struct.MYstr* noalias nocapture nofree noundef nonnull readonly byval align 8 dereferenceable(8) [[U:%.*]]) [[ATTR0:#.*]] {
+; IS__TUNIT_OPM-SAME: (%struct.MYstr* noalias nocapture nofree noundef nonnull readonly byval(%struct.MYstr) align 8 dereferenceable(8) [[U:%.*]]) [[ATTR0:#.*]] {
; IS__TUNIT_OPM-NEXT: entry:
; IS__TUNIT_OPM-NEXT: [[TMP0:%.*]] = getelementptr [[STRUCT_MYSTR:%.*]], %struct.MYstr* @mystr, i32 0, i32 1
; IS__TUNIT_OPM-NEXT: [[TMP1:%.*]] = load i32, i32* [[TMP0]], align 4
; IS__TUNIT_OPM-LABEL: define {{[^@]+}}@unions
; IS__TUNIT_OPM-SAME: () [[ATTR1:#.*]] {
; IS__TUNIT_OPM-NEXT: entry:
-; IS__TUNIT_OPM-NEXT: [[RESULT:%.*]] = call i32 @vfu2(%struct.MYstr* nocapture nofree noundef nonnull readonly byval align 8 dereferenceable(8) @mystr) [[ATTR0]]
+; IS__TUNIT_OPM-NEXT: [[RESULT:%.*]] = call i32 @vfu2(%struct.MYstr* nocapture nofree noundef nonnull readonly byval(%struct.MYstr) align 8 dereferenceable(8) @mystr) [[ATTR0]]
; IS__TUNIT_OPM-NEXT: ret i32 [[RESULT]]
;
; IS__TUNIT_NPM: Function Attrs: nofree nosync nounwind willreturn
; IS__CGSCC____-NEXT: ret i32 [[RESULT]]
;
entry:
- call void @vfu1(%struct.MYstr* byval align 4 @mystr) nounwind
- %result = call i32 @vfu2(%struct.MYstr* byval align 4 @mystr) nounwind
+ call void @vfu1(%struct.MYstr* byval(%struct.MYstr) align 4 @mystr) nounwind
+ %result = call i32 @vfu2(%struct.MYstr* byval(%struct.MYstr) align 4 @mystr) nounwind
ret i32 %result
}
-define internal i32 @vfu2_v2(%struct.MYstr* byval align 4 %u) nounwind readonly {
+define internal i32 @vfu2_v2(%struct.MYstr* byval(%struct.MYstr) align 4 %u) nounwind readonly {
; IS__TUNIT_OPM: Function Attrs: nofree nosync nounwind readnone willreturn
; IS__TUNIT_OPM-LABEL: define {{[^@]+}}@vfu2_v2
-; IS__TUNIT_OPM-SAME: (%struct.MYstr* noalias nocapture nofree noundef nonnull byval align 8 dereferenceable(8) [[U:%.*]]) [[ATTR2:#.*]] {
+; IS__TUNIT_OPM-SAME: (%struct.MYstr* noalias nocapture nofree noundef nonnull byval(%struct.MYstr) align 8 dereferenceable(8) [[U:%.*]]) [[ATTR2:#.*]] {
; IS__TUNIT_OPM-NEXT: entry:
; IS__TUNIT_OPM-NEXT: [[Z:%.*]] = getelementptr [[STRUCT_MYSTR:%.*]], %struct.MYstr* [[U]], i32 0, i32 1
; IS__TUNIT_OPM-NEXT: store i32 99, i32* [[Z]], align 4
;
; IS__CGSCC_OPM: Function Attrs: nofree norecurse nosync nounwind readnone willreturn
; IS__CGSCC_OPM-LABEL: define {{[^@]+}}@vfu2_v2
-; IS__CGSCC_OPM-SAME: (%struct.MYstr* noalias nocapture nofree noundef nonnull byval align 8 dereferenceable(8) [[U:%.*]]) [[ATTR0]] {
+; IS__CGSCC_OPM-SAME: (%struct.MYstr* noalias nocapture nofree noundef nonnull byval(%struct.MYstr) align 8 dereferenceable(8) [[U:%.*]]) [[ATTR0]] {
; IS__CGSCC_OPM-NEXT: entry:
; IS__CGSCC_OPM-NEXT: [[Z:%.*]] = getelementptr [[STRUCT_MYSTR:%.*]], %struct.MYstr* [[U]], i32 0, i32 1
; IS__CGSCC_OPM-NEXT: store i32 99, i32* [[Z]], align 4
; IS__TUNIT_OPM-LABEL: define {{[^@]+}}@unions_v2
; IS__TUNIT_OPM-SAME: () [[ATTR2]] {
; IS__TUNIT_OPM-NEXT: entry:
-; IS__TUNIT_OPM-NEXT: [[RESULT:%.*]] = call i32 @vfu2_v2(%struct.MYstr* nocapture nofree noundef nonnull readonly byval align 8 dereferenceable(8) @mystr) [[ATTR2]]
+; IS__TUNIT_OPM-NEXT: [[RESULT:%.*]] = call i32 @vfu2_v2(%struct.MYstr* nocapture nofree noundef nonnull readonly byval(%struct.MYstr) align 8 dereferenceable(8) @mystr) [[ATTR2]]
; IS__TUNIT_OPM-NEXT: ret i32 [[RESULT]]
;
; IS__TUNIT_NPM: Function Attrs: nofree nosync nounwind readnone willreturn
; IS__CGSCC_OPM-LABEL: define {{[^@]+}}@unions_v2
; IS__CGSCC_OPM-SAME: () [[ATTR0]] {
; IS__CGSCC_OPM-NEXT: entry:
-; IS__CGSCC_OPM-NEXT: [[RESULT:%.*]] = call i32 @vfu2_v2(%struct.MYstr* noalias nocapture nofree noundef nonnull readnone byval align 8 dereferenceable(8) @mystr) [[ATTR3:#.*]]
+; IS__CGSCC_OPM-NEXT: [[RESULT:%.*]] = call i32 @vfu2_v2(%struct.MYstr* noalias nocapture nofree noundef nonnull readnone byval(%struct.MYstr) align 8 dereferenceable(8) @mystr) [[ATTR3:#.*]]
; IS__CGSCC_OPM-NEXT: ret i32 [[RESULT]]
;
; IS__CGSCC_NPM: Function Attrs: nofree norecurse nosync nounwind readonly willreturn
; IS__CGSCC_NPM-NEXT: ret i32 [[RESULT]]
;
entry:
- call void @vfu1(%struct.MYstr* byval align 4 @mystr) nounwind
- %result = call i32 @vfu2_v2(%struct.MYstr* byval align 4 @mystr) nounwind
+ call void @vfu1(%struct.MYstr* byval(%struct.MYstr) align 4 @mystr) nounwind
+ %result = call i32 @vfu2_v2(%struct.MYstr* byval(%struct.MYstr) align 4 @mystr) nounwind
ret i32 %result
}
;{
declare void @escape_i8(i8* %ptr)
-define void @byval_not_readonly_1(i8* byval %written) readonly {
+define void @byval_not_readonly_1(i8* byval(i8) %written) readonly {
; CHECK: Function Attrs: readonly
; CHECK-LABEL: define {{[^@]+}}@byval_not_readonly_1
-; CHECK-SAME: (i8* noalias nonnull byval dereferenceable(1) [[WRITTEN:%.*]]) [[ATTR2]] {
+; CHECK-SAME: (i8* noalias nonnull byval(i8) dereferenceable(1) [[WRITTEN:%.*]]) [[ATTR2]] {
; CHECK-NEXT: call void @escape_i8(i8* nonnull dereferenceable(1) [[WRITTEN]])
; CHECK-NEXT: ret void
;
ret void
}
-define void @byval_not_readonly_2(i8* byval %written) readonly {
+define void @byval_not_readonly_2(i8* byval(i8) %written) readonly {
; IS__TUNIT____: Function Attrs: nofree nosync nounwind readnone willreturn
; IS__TUNIT____-LABEL: define {{[^@]+}}@byval_not_readonly_2
-; IS__TUNIT____-SAME: (i8* noalias nocapture nofree nonnull writeonly byval dereferenceable(1) [[WRITTEN:%.*]]) [[ATTR1]] {
+; IS__TUNIT____-SAME: (i8* noalias nocapture nofree nonnull writeonly byval(i8) dereferenceable(1) [[WRITTEN:%.*]]) [[ATTR1]] {
; IS__TUNIT____-NEXT: store i8 0, i8* [[WRITTEN]], align 1
; IS__TUNIT____-NEXT: ret void
;
; IS__CGSCC____: Function Attrs: nofree norecurse nosync nounwind readnone willreturn
; IS__CGSCC____-LABEL: define {{[^@]+}}@byval_not_readonly_2
-; IS__CGSCC____-SAME: (i8* noalias nocapture nofree nonnull writeonly byval dereferenceable(1) [[WRITTEN:%.*]]) [[ATTR1]] {
+; IS__CGSCC____-SAME: (i8* noalias nocapture nofree nonnull writeonly byval(i8) dereferenceable(1) [[WRITTEN:%.*]]) [[ATTR1]] {
; IS__CGSCC____-NEXT: store i8 0, i8* [[WRITTEN]], align 1
; IS__CGSCC____-NEXT: ret void
;
ret void
}
-define void @byval_not_readnone_1(i8* byval %written) readnone {
+define void @byval_not_readnone_1(i8* byval(i8) %written) readnone {
; CHECK: Function Attrs: readnone
; CHECK-LABEL: define {{[^@]+}}@byval_not_readnone_1
-; CHECK-SAME: (i8* noalias nonnull byval dereferenceable(1) [[WRITTEN:%.*]]) [[ATTR9:#.*]] {
+; CHECK-SAME: (i8* noalias nonnull byval(i8) dereferenceable(1) [[WRITTEN:%.*]]) [[ATTR9:#.*]] {
; CHECK-NEXT: call void @escape_i8(i8* nonnull dereferenceable(1) [[WRITTEN]])
; CHECK-NEXT: ret void
;
ret void
}
-define void @byval_not_readnone_2(i8* byval %written) readnone {
+define void @byval_not_readnone_2(i8* byval(i8) %written) readnone {
; IS__TUNIT____: Function Attrs: nofree nosync nounwind readnone willreturn
; IS__TUNIT____-LABEL: define {{[^@]+}}@byval_not_readnone_2
-; IS__TUNIT____-SAME: (i8* noalias nocapture nofree nonnull writeonly byval dereferenceable(1) [[WRITTEN:%.*]]) [[ATTR1]] {
+; IS__TUNIT____-SAME: (i8* noalias nocapture nofree nonnull writeonly byval(i8) dereferenceable(1) [[WRITTEN:%.*]]) [[ATTR1]] {
; IS__TUNIT____-NEXT: store i8 0, i8* [[WRITTEN]], align 1
; IS__TUNIT____-NEXT: ret void
;
; IS__CGSCC____: Function Attrs: nofree norecurse nosync nounwind readnone willreturn
; IS__CGSCC____-LABEL: define {{[^@]+}}@byval_not_readnone_2
-; IS__CGSCC____-SAME: (i8* noalias nocapture nofree nonnull writeonly byval dereferenceable(1) [[WRITTEN:%.*]]) [[ATTR1]] {
+; IS__CGSCC____-SAME: (i8* noalias nocapture nofree nonnull writeonly byval(i8) dereferenceable(1) [[WRITTEN:%.*]]) [[ATTR1]] {
; IS__CGSCC____-NEXT: store i8 0, i8* [[WRITTEN]], align 1
; IS__CGSCC____-NEXT: ret void
;
ret void
}
-define void @byval_no_fnarg(i8* byval %written) {
+define void @byval_no_fnarg(i8* byval(i8) %written) {
; IS__TUNIT____: Function Attrs: nofree nosync nounwind readnone willreturn
; IS__TUNIT____-LABEL: define {{[^@]+}}@byval_no_fnarg
-; IS__TUNIT____-SAME: (i8* noalias nocapture nofree nonnull writeonly byval dereferenceable(1) [[WRITTEN:%.*]]) [[ATTR1]] {
+; IS__TUNIT____-SAME: (i8* noalias nocapture nofree nonnull writeonly byval(i8) dereferenceable(1) [[WRITTEN:%.*]]) [[ATTR1]] {
; IS__TUNIT____-NEXT: store i8 0, i8* [[WRITTEN]], align 1
; IS__TUNIT____-NEXT: ret void
;
; IS__CGSCC____: Function Attrs: nofree norecurse nosync nounwind readnone willreturn
; IS__CGSCC____-LABEL: define {{[^@]+}}@byval_no_fnarg
-; IS__CGSCC____-SAME: (i8* noalias nocapture nofree nonnull writeonly byval dereferenceable(1) [[WRITTEN:%.*]]) [[ATTR1]] {
+; IS__CGSCC____-SAME: (i8* noalias nocapture nofree nonnull writeonly byval(i8) dereferenceable(1) [[WRITTEN:%.*]]) [[ATTR1]] {
; IS__CGSCC____-NEXT: store i8 0, i8* [[WRITTEN]], align 1
; IS__CGSCC____-NEXT: ret void
;
}
@S = external global %struct.X
-define internal void @test_byval(%struct.X* byval %a) {
+define internal void @test_byval(%struct.X* byval(%struct.X) %a) {
; IS__CGSCC_OPM: Function Attrs: nofree norecurse nosync nounwind readnone willreturn
; IS__CGSCC_OPM-LABEL: define {{[^@]+}}@test_byval
-; IS__CGSCC_OPM-SAME: (%struct.X* noalias nocapture nofree noundef nonnull writeonly byval align 8 dereferenceable(8) [[A:%.*]]) [[ATTR1]] {
+; IS__CGSCC_OPM-SAME: (%struct.X* noalias nocapture nofree noundef nonnull writeonly byval(%struct.X) align 8 dereferenceable(8) [[A:%.*]]) [[ATTR1]] {
; IS__CGSCC_OPM-NEXT: [[G0:%.*]] = getelementptr [[STRUCT_X:%.*]], %struct.X* [[A]], i32 0, i32 0
; IS__CGSCC_OPM-NEXT: store i8* null, i8** [[G0]], align 8
; IS__CGSCC_OPM-NEXT: ret void
ret void
}
-define internal i8*@test_byval2(%struct.X* byval %a) {
+define internal i8*@test_byval2(%struct.X* byval(%struct.X) %a) {
; IS__TUNIT____: Function Attrs: nofree nosync nounwind readonly willreturn
; IS__TUNIT____-LABEL: define {{[^@]+}}@test_byval2
; IS__TUNIT____-SAME: () [[ATTR3:#.*]] {
define i32 @bar() {
; CHECK: call void @foo(i8 signext 1) [[NUW]]
- %A = call zeroext i8(i8*, i8, ...) @foo(i8* inreg null, i8 signext 1, %struct* byval null ) nounwind
+ %A = call zeroext i8(i8*, i8, ...) @foo(i8* inreg null, i8 signext 1, %struct* byval(%struct) null ) nounwind
ret i32 0
}
ret i32 undef
}
-declare void @llvm.va_start(i8*) nounwind
+declare void @llvm.va_start(i8*) nounwind
-declare void @llvm.va_end(i8*) nounwind
+declare void @llvm.va_end(i8*) nounwind
define i32 @main() {
entry:
%"alloca point" = bitcast i32 0 to i32 ; <i32> [#uses=0]
%tmp = getelementptr [4 x %struct.point], [4 x %struct.point]* @pts, i32 0, i32 0 ; <%struct.point*> [#uses=1]
- %tmp1 = call i32 (i32, ...) @va1( i32 1, %struct.point* byval %tmp ) nounwind ; <i32> [#uses=0]
- call void @exit( i32 0 ) noreturn nounwind
+ %tmp1 = call i32 (i32, ...) @va1(i32 1, %struct.point* byval(%struct.point) %tmp) nounwind ; <i32> [#uses=0]
+ call void @exit( i32 0 ) noreturn nounwind
unreachable
}
-declare void @exit(i32) noreturn nounwind
+declare void @exit(i32) noreturn nounwind
define i32 @call_va(i32 %in) {
%stacked = alloca i32
store i32 42, i32* %stacked
- %res = call i32(i32, i32, ...) @va_func(i32 %in, i32 %in, [6 x i32] undef, i32* byval %stacked)
+ %res = call i32(i32, i32, ...) @va_func(i32 %in, i32 %in, [6 x i32] undef, i32* byval(i32) %stacked)
ret i32 %res
-; CHECK: call i32 (i32, i32, ...) @va_func(i32 undef, i32 %in, [6 x i32] undef, i32* byval %stacked)
+; CHECK: call i32 (i32, i32, ...) @va_func(i32 undef, i32 %in, [6 x i32] undef, i32* byval(i32) %stacked)
}
define internal i32 @va_deadret_func(i32 %a, i32 %b, ...) {
define void @call_deadret(i32 %in) {
%stacked = alloca i32
store i32 42, i32* %stacked
- call i32 (i32, i32, ...) @va_deadret_func(i32 undef, i32 %in, [6 x i32] undef, i32* byval %stacked)
+ call i32 (i32, i32, ...) @va_deadret_func(i32 undef, i32 %in, [6 x i32] undef, i32* byval(i32) %stacked)
ret void
-; CHECK: call void (i32, i32, ...) @va_deadret_func(i32 undef, i32 undef, [6 x i32] undef, i32* byval %stacked)
+; CHECK: call void (i32, i32, ...) @va_deadret_func(i32 undef, i32 undef, [6 x i32] undef, i32* byval(i32) %stacked)
}
; Right now the DSE in presence of fence is only done in end blocks (with no successors),
; but the same logic applies to other basic blocks as well.
; The store to %addr.i can be removed since it is a byval attribute
-define void @test3(i32* byval %addr.i) {
+define void @test3(i32* byval(i32) %addr.i) {
; CHECK-LABEL: @test3
; CHECK-NOT: store
; CHECK: fence
; Test for byval handling.
%struct.x = type { i32, i32, i32, i32 }
-define void @test9(%struct.x* byval %a) nounwind {
+define void @test9(%struct.x* byval(%struct.x) %a) nounwind {
; CHECK-LABEL: @test9(
; CHECK-NEXT: ret void
;
}
; The store here is not dead because the byval call reads it.
-declare void @test19f({i32}* byval align 4 %P)
+declare void @test19f({i32}* byval({i32}) align 4 %P)
-define void @test19({i32} * nocapture byval align 4 %arg5) nounwind ssp {
+define void @test19({i32}* nocapture byval({i32}) align 4 %arg5) nounwind ssp {
; CHECK-LABEL: @test19(
; CHECK-NEXT: bb:
; CHECK-NEXT: [[TMP7:%.*]] = getelementptr inbounds { i32 }, { i32 }* [[ARG5:%.*]], i32 0, i32 0
; CHECK-NEXT: store i32 912, i32* [[TMP7]], align 4
-; CHECK-NEXT: call void @test19f({ i32 }* byval align 4 [[ARG5]])
+; CHECK-NEXT: call void @test19f({ i32 }* byval({ i32 }) align 4 [[ARG5]])
; CHECK-NEXT: ret void
;
bb:
%tmp7 = getelementptr inbounds {i32}, {i32}* %arg5, i32 0, i32 0
store i32 912, i32* %tmp7
- call void @test19f({i32}* byval align 4 %arg5)
+ call void @test19f({i32}* byval({i32}) align 4 %arg5)
ret void
}
target datalayout = "e-m:e-p:32:32-f64:32:64-f80:32-n8:16:32-S128"
target triple = "i386-unknown-linux-gnu"
-declare void @g(i32* byval %p)
+declare void @g(i32* byval(i32) %p)
-define void @f(i32* byval %x) {
+define void @f(i32* byval(i32) %x) {
entry:
%p = alloca i32
%v = load i32, i32* %x
store i32 %v, i32* %p
- tail call void @g(i32* byval %p)
+ tail call void @g(i32* byval(i32) %p)
ret void
}
-; CHECK-LABEL: define void @f(i32* byval %x)
+; CHECK-LABEL: define void @f(i32* byval(i32) %x)
; CHECK: store i32 %v, i32* %p
-; CHECK: tail call void @g(i32* byval %p)
+; CHECK: tail call void @g(i32* byval(i32) %p)
; Right now the DSE in presence of fence is only done in end blocks (with no successors),
; but the same logic applies to other basic blocks as well.
; The store to %addr.i can be removed since it is a byval attribute
-define void @test3(i32* byval %addr.i) {
+define void @test3(i32* byval(i32) %addr.i) {
; CHECK-LABEL: @test3
; CHECK-NOT: store
; CHECK: fence
; Test for byval handling.
%struct.x = type { i32, i32, i32, i32 }
-define void @test9(%struct.x* byval %a) nounwind {
+define void @test9(%struct.x* byval(%struct.x) %a) nounwind {
; CHECK-LABEL: @test9(
; CHECK-NEXT: ret void
;
; The store here is not dead because the byval call reads it.
-declare void @test19f({i32}* byval align 4 %P)
+declare void @test19f({i32}* byval({i32}) align 4 %P)
-define void @test19({i32} * nocapture byval align 4 %arg5) nounwind ssp {
+define void @test19({i32} * nocapture byval({i32}) align 4 %arg5) nounwind ssp {
; CHECK-LABEL: @test19(
; CHECK-NEXT: bb:
; CHECK-NEXT: [[TMP7:%.*]] = getelementptr inbounds { i32 }, { i32 }* [[ARG5:%.*]], i32 0, i32 0
; CHECK-NEXT: store i32 912, i32* [[TMP7]], align 4
-; CHECK-NEXT: call void @test19f({ i32 }* byval align 4 [[ARG5]])
+; CHECK-NEXT: call void @test19f({ i32 }* byval({ i32 }) align 4 [[ARG5]])
; CHECK-NEXT: ret void
;
bb:
%tmp7 = getelementptr inbounds {i32}, {i32}* %arg5, i32 0, i32 0
store i32 912, i32* %tmp7
- call void @test19f({i32}* byval align 4 %arg5)
+ call void @test19f({i32}* byval({i32}) align 4 %arg5)
ret void
}
target datalayout = "e-m:e-p:32:32-f64:32:64-f80:32-n8:16:32-S128"
target triple = "i386-unknown-linux-gnu"
-declare void @g(i32* byval %p)
+declare void @g(i32* byval(i32) %p)
-define void @f(i32* byval %x) {
+define void @f(i32* byval(i32) %x) {
entry:
%p = alloca i32
%v = load i32, i32* %x
store i32 %v, i32* %p
- tail call void @g(i32* byval %p)
+ tail call void @g(i32* byval(i32) %p)
ret void
}
-; CHECK-LABEL: define void @f(i32* byval %x)
+; CHECK-LABEL: define void @f(i32* byval(i32) %x)
; CHECK: store i32 %v, i32* %p
-; CHECK: tail call void @g(i32* byval %p)
+; CHECK: tail call void @g(i32* byval(i32) %p)
; RUN: opt < %s -basic-aa -gvn
target datalayout = "e-p:64:64:64-i1:8:8-i8:8:8-i16:16:16-i32:32:32-i64:64:64-f32:32:32-f64:64:64-v64:64:64-v128:128:128-a0:0:64-s0:64:64-f80:128:128-n8:16:32:64-S128"
%struct.S0 = type { [2 x i8], [2 x i8], [4 x i8], [2 x i8], i32, i32, i32, i32 }
-define void @fn1(%struct.S0* byval align 8 %p1) {
+define void @fn1(%struct.S0* byval(%struct.S0) align 8 %p1) {
br label %for.cond
for.cond: ; preds = %1, %0
br label %for.end
ret void
}
-declare fastcc void @cc70a02__complex_integers__complex.164(%struct.cc70a02__complex_integers__complex_type* noalias nocapture sret, i8 signext, i8 signext) nounwind
+declare fastcc void @cc70a02__complex_integers__complex.164(%struct.cc70a02__complex_integers__complex_type* noalias nocapture sret(%struct.cc70a02__complex_integers__complex_type), i8 signext, i8 signext) nounwind
-declare fastcc void @cc70a02__complex_integers__Osubtract.149(%struct.cc70a02__complex_integers__complex_type* noalias sret, %struct.cc70a02__complex_integers__complex_type* byval align 4)
+declare fastcc void @cc70a02__complex_integers__Osubtract.149(%struct.cc70a02__complex_integers__complex_type* noalias sret(%struct.cc70a02__complex_integers__complex_type), %struct.cc70a02__complex_integers__complex_type* byval(%struct.cc70a02__complex_integers__complex_type) align 4)
-declare fastcc void @cc70a02__complex_integers__Oadd.153(%struct.cc70a02__complex_integers__complex_type* noalias sret, %struct.cc70a02__complex_integers__complex_type* byval align 4, %struct.cc70a02__complex_integers__complex_type* byval align 4)
+declare fastcc void @cc70a02__complex_integers__Oadd.153(%struct.cc70a02__complex_integers__complex_type* noalias sret(%struct.cc70a02__complex_integers__complex_type), %struct.cc70a02__complex_integers__complex_type* byval(%struct.cc70a02__complex_integers__complex_type) align 4, %struct.cc70a02__complex_integers__complex_type* byval(%struct.cc70a02__complex_integers__complex_type) align 4)
-declare fastcc void @cc70a02__complex_multiplication.170(%struct.cc70a02__complex_integers__complex_type* noalias sret, %struct.cc70a02__complex_integers__complex_type* byval align 4)
+declare fastcc void @cc70a02__complex_multiplication.170(%struct.cc70a02__complex_integers__complex_type* noalias sret(%struct.cc70a02__complex_integers__complex_type), %struct.cc70a02__complex_integers__complex_type* byval(%struct.cc70a02__complex_integers__complex_type) align 4)
declare void @__gnat_rcheck_12(i8*, i32) noreturn
%struct.s = type { i32, i32 }
-define void @foo(%struct.s* byval nocapture readonly %a) {
+define void @foo(%struct.s* byval(%struct.s) nocapture readonly %a) {
entry:
%x = alloca [2 x i32], align 4
%a1 = getelementptr inbounds %struct.s, %struct.s* %a, i64 0, i32 0
ret void
}
-define void @foo0(%struct.s* byval nocapture readonly %a) {
+define void @foo0(%struct.s* byval(%struct.s) nocapture readonly %a) {
entry:
%x = alloca [2 x i32]
%a1 = getelementptr inbounds %struct.s, %struct.s* %a, i64 0, i32 0
ret void
}
-define void @foo1(%struct.s* byval nocapture readonly %a) {
+define void @foo1(%struct.s* byval(%struct.s) nocapture readonly %a) {
entry:
%x = alloca [2 x i32], align 1
%a1 = getelementptr inbounds %struct.s, %struct.s* %a, i64 0, i32 0
declare void @bar(i32*) #1
-define void @goo(%struct.s* byval nocapture readonly %a) {
+define void @goo(%struct.s* byval(%struct.s) nocapture readonly %a) {
entry:
%x = alloca [2 x i32], align 32
%a1 = getelementptr inbounds %struct.s, %struct.s* %a, i64 0, i32 0
store i64 0, i64* %a, align 8
%a1 = bitcast i64* %a to i32*
store i32 1, i32* %a1, align 8
- call void @foo(%struct.s* byval %tmpcast)
+ call void @foo(%struct.s* byval(%struct.s) %tmpcast)
store i32 2, i32* %a1, align 8
- call void @goo(%struct.s* byval %tmpcast)
+ call void @goo(%struct.s* byval(%struct.s) %tmpcast)
ret i32 0
}
store i64 0, i64* %a, align 8
%a1 = bitcast i64* %a to i32*
store i32 1, i32* %a1, align 8
- call void @foo0(%struct.s* byval %tmpcast)
+ call void @foo0(%struct.s* byval(%struct.s) %tmpcast)
store i32 2, i32* %a1, align 8
- call void @goo(%struct.s* byval %tmpcast)
+ call void @goo(%struct.s* byval(%struct.s) %tmpcast)
ret i32 0
}
declare void @ext(i32*)
-define void @bar(i32* byval %x) {
+define void @bar(i32* byval(i32) %x) {
call void @ext(i32* %x)
ret void
}
; CHECK-LABEL: define void @foo(
; CHECK: llvm.lifetime.start
; CHECK: store i32 %2, i32* %x
- call void @bar(i32* byval %x)
+ call void @bar(i32* byval(i32) %x)
ret void
}
-define internal void @qux(i32* byval %x) {
+define internal void @qux(i32* byval(i32) %x) {
call void @ext(i32* %x)
tail call void @ext(i32* null)
ret void
; CHECK: {{^ *}}call void @ext(i32* nonnull %[[POS]]
; CHECK: tail call void @ext(i32* null)
; CHECK: ret void
- tail call void @qux(i32* byval %x)
+ tail call void @qux(i32* byval(i32) %x)
ret void
}
; A byval parameter passed into a function which is passed out as byval does
; not block the call from being marked as tail.
-declare void @ext2(i32* byval)
+declare void @ext2(i32* byval(i32))
-define void @bar2(i32* byval %x) {
- call void @ext2(i32* byval %x)
+define void @bar2(i32* byval(i32) %x) {
+ call void @ext2(i32* byval(i32) %x)
ret void
}
; CHECK: %[[POS:.*]] = alloca i32
; CHECK: %[[VAL:.*]] = load i32, i32* %x
; CHECK: store i32 %[[VAL]], i32* %[[POS]]
-; CHECK: tail call void @ext2(i32* nonnull byval %[[POS]]
+; CHECK: tail call void @ext2(i32* nonnull byval(i32) %[[POS]]
; CHECK: ret void
- tail call void @bar2(i32* byval %x)
+ tail call void @bar2(i32* byval(i32) %x)
ret void
}
; CHECK: %[[POS:.*]] = alloca i32
; CHECK: %[[VAL:.*]] = load i32, i32* %x
; CHECK: store i32 %[[VAL]], i32* %[[POS]]
-; CHECK: tail call void @ext2(i32* nonnull byval %[[POS]]
+; CHECK: tail call void @ext2(i32* nonnull byval(i32) %[[POS]]
; CHECK: ret void
%x = alloca i32
- tail call void @bar2(i32* byval %x)
+ tail call void @bar2(i32* byval(i32) %x)
ret void
}
%struct.ss = type { i32, i64 }
@.str = internal constant [10 x i8] c"%d, %lld\0A\00" ; <[10 x i8]*> [#uses=1]
-define internal void @f(%struct.ss* byval %b) nounwind {
+define internal void @f(%struct.ss* byval(%struct.ss) %b) nounwind {
entry:
%tmp = getelementptr %struct.ss, %struct.ss* %b, i32 0, i32 0 ; <i32*> [#uses=2]
%tmp1 = load i32, i32* %tmp, align 4 ; <i32> [#uses=1]
ret void
}
-declare i32 @printf(i8*, ...) nounwind
+declare i32 @printf(i8*, ...) nounwind
define i32 @test1() nounwind {
entry:
store i32 1, i32* %tmp1, align 8
%tmp4 = getelementptr %struct.ss, %struct.ss* %S, i32 0, i32 1 ; <i64*> [#uses=1]
store i64 2, i64* %tmp4, align 4
- call void @f( %struct.ss* byval %S ) nounwind
+ call void @f(%struct.ss* byval(%struct.ss) %S) nounwind
ret i32 0
; CHECK: @test1()
; CHECK: %S1 = alloca %struct.ss
; CHECK: ret i32 0
}
-; Inlining a byval struct should NOT cause an explicit copy
+; Inlining a byval struct should NOT cause an explicit copy
; into an alloca if the function is readonly
-define internal i32 @f2(%struct.ss* byval %b) nounwind readonly {
+define internal i32 @f2(%struct.ss* byval(%struct.ss) %b) nounwind readonly {
entry:
%tmp = getelementptr %struct.ss, %struct.ss* %b, i32 0, i32 0 ; <i32*> [#uses=2]
%tmp1 = load i32, i32* %tmp, align 4 ; <i32> [#uses=1]
store i32 1, i32* %tmp1, align 8
%tmp4 = getelementptr %struct.ss, %struct.ss* %S, i32 0, i32 1 ; <i64*> [#uses=1]
store i64 2, i64* %tmp4, align 4
- %X = call i32 @f2( %struct.ss* byval %S ) nounwind
+ %X = call i32 @f2(%struct.ss* byval(%struct.ss) %S) nounwind
ret i32 %X
; CHECK: @test2()
; CHECK: %S = alloca %struct.ss
; PR8769
declare void @g3(%struct.ss* %p)
-define internal void @f3(%struct.ss* byval align 64 %b) nounwind {
+define internal void @f3(%struct.ss* byval(%struct.ss) align 64 %b) nounwind {
call void @g3(%struct.ss* %b) ;; Could make alignment assumptions!
ret void
}
define void @test3() nounwind {
entry:
%S = alloca %struct.ss, align 1 ;; May not be aligned.
- call void @f3( %struct.ss* byval align 64 %S) nounwind
+ call void @f3(%struct.ss* byval(%struct.ss) align 64 %S) nounwind
ret void
; CHECK: @test3()
; CHECK: %S1 = alloca %struct.ss, align 64
}
-; Inlining a byval struct should NOT cause an explicit copy
+; Inlining a byval struct should NOT cause an explicit copy
; into an alloca if the function is readonly, but should increase an alloca's
; alignment to satisfy an explicit alignment request.
-define internal i32 @f4(%struct.ss* byval align 64 %b) nounwind readonly {
+define internal i32 @f4(%struct.ss* byval(%struct.ss) align 64 %b) nounwind readonly {
call void @g3(%struct.ss* %b)
ret i32 4
}
define i32 @test4() nounwind {
entry:
%S = alloca %struct.ss, align 2 ; <%struct.ss*> [#uses=4]
- %X = call i32 @f4( %struct.ss* byval align 64 %S ) nounwind
+ %X = call i32 @f4(%struct.ss* byval(%struct.ss) align 64 %S) nounwind
ret i32 %X
; CHECK: @test4()
; CHECK: %S = alloca %struct.ss, align 64
@b = global %struct.S0 { i32 1 }, align 4
@a = common global i32 0, align 4
-define internal void @f5(%struct.S0* byval nocapture readonly align 4 %p) {
+define internal void @f5(%struct.S0* byval(%struct.S0) nocapture readonly align 4 %p) {
entry:
store i32 0, i32* getelementptr inbounds (%struct.S0, %struct.S0* @b, i64 0, i32 0), align 4
%f2 = getelementptr inbounds %struct.S0, %struct.S0* %p, i64 0, i32 0
define i32 @test5() {
entry:
- tail call void @f5(%struct.S0* byval align 4 @b)
+ tail call void @f5(%struct.S0* byval(%struct.S0) align 4 @b)
%0 = load i32, i32* @a, align 4
ret i32 %0
; CHECK: @test5()
@d = addrspace(1) global %struct.S1 { i32 1 }, align 4
@c = common addrspace(1) global i32 0, align 4
-define internal void @f5_as1(%struct.S1 addrspace(1)* byval nocapture readonly align 4 %p) {
+define internal void @f5_as1(%struct.S1 addrspace(1)* byval(%struct.S1) nocapture readonly align 4 %p) {
entry:
store i32 0, i32 addrspace(1)* getelementptr inbounds (%struct.S1, %struct.S1 addrspace(1)* @d, i64 0, i32 0), align 4
%f2 = getelementptr inbounds %struct.S1, %struct.S1 addrspace(1)* %p, i64 0, i32 0
define i32 @test5_as1() {
entry:
- tail call void @f5_as1(%struct.S1 addrspace(1)* byval align 4 @d)
+ tail call void @f5_as1(%struct.S1 addrspace(1)* byval(%struct.S1) align 4 @d)
%0 = load i32, i32 addrspace(1)* @c, align 4
ret i32 %0
; CHECK: @test5_as1()
@gFoo = global %struct.foo zeroinitializer, align 8
-define i32 @foo(%struct.foo* byval align 8 %f, i32 %a) {
+define i32 @foo(%struct.foo* byval(%struct.foo) align 8 %f, i32 %a) {
entry:
%a1 = getelementptr inbounds %struct.foo, %struct.foo* %f, i32 0, i32 1
%arrayidx = getelementptr inbounds [16 x i32], [16 x i32]* %a1, i32 0, i32 %a
; CHECK: llvm.lifetime.start
; CHECK: memcpy
entry:
- %call = call i32 @foo(%struct.foo* byval align 8 @gFoo, i32 %argc)
+ %call = call i32 @foo(%struct.foo* byval(%struct.foo) align 8 @gFoo, i32 %argc)
ret i32 %call
}
%shadow_ray = alloca %struct.ray, align 8
call void @fix(%struct.ray* %shadow_ray)
- %call = call i32 @ray_sphere(%struct.sphere* %i, %struct.ray* byval align 8 %shadow_ray, %struct.spoint* null)
+ %call = call i32 @ray_sphere(%struct.sphere* %i, %struct.ray* byval(%struct.ray) align 8 %shadow_ray, %struct.spoint* null)
ret i32 %call
; CHECK-LABEL: @caller(
declare void @fix(%struct.ray*)
-define i32 @ray_sphere(%struct.sphere* nocapture %sph, %struct.ray* nocapture byval align 8 %ray, %struct.spoint* %sp) nounwind uwtable ssp {
+define i32 @ray_sphere(%struct.sphere* nocapture %sph, %struct.ray* nocapture byval(%struct.ray) align 8 %ray, %struct.spoint* %sp) nounwind uwtable ssp {
%1 = getelementptr inbounds %struct.ray, %struct.ray* %ray, i64 0, i32 1, i32 0
%2 = load double, double* %1, align 8
%3 = fmul double %2, %2
; CHECK: musttail call void @test_byval_c(
; CHECK-NEXT: ret void
-declare void @test_byval_c(i32* byval %p)
-define internal void @test_byval_b(i32* byval %p) {
- musttail call void @test_byval_c(i32* byval %p)
+declare void @test_byval_c(i32* byval(i32) %p)
+define internal void @test_byval_b(i32* byval(i32) %p) {
+ musttail call void @test_byval_c(i32* byval(i32) %p)
ret void
}
-define void @test_byval_a(i32* byval %p) {
- musttail call void @test_byval_b(i32* byval %p)
+define void @test_byval_a(i32* byval(i32) %p) {
+ musttail call void @test_byval_b(i32* byval(i32) %p)
ret void
}
; CHECK-NEXT: ret void
declare void @escape(i8* %buf)
-declare void @test_dynalloca_c(i32* byval %p, i32 %n)
-define internal void @test_dynalloca_b(i32* byval %p, i32 %n) alwaysinline {
+declare void @test_dynalloca_c(i32* byval(i32) %p, i32 %n)
+define internal void @test_dynalloca_b(i32* byval(i32) %p, i32 %n) alwaysinline {
%buf = alloca i8, i32 %n ; dynamic alloca
call void @escape(i8* %buf) ; escape it
- musttail call void @test_dynalloca_c(i32* byval %p, i32 %n)
+ musttail call void @test_dynalloca_c(i32* byval(i32) %p, i32 %n)
ret void
}
-define void @test_dynalloca_a(i32* byval %p, i32 %n) {
- musttail call void @test_dynalloca_b(i32* byval %p, i32 %n)
+define void @test_dynalloca_a(i32* byval(i32) %p, i32 %n) {
+ musttail call void @test_dynalloca_b(i32* byval(i32) %p, i32 %n)
ret void
}
}
define void @test_caller_2(i8* %p, i8* %q, i16 %r) {
- call signext i16 (...) @test_callee_2(i8* %p, i8* byval %q, i16 signext %r)
+ call signext i16 (...) @test_callee_2(i8* %p, i8* byval(i8) %q, i16 signext %r)
ret void
}
; CHECK-LABEL: define void @test_caller_2
-; CHECK: call signext i16 (...) @vararg_fn(i8* %p, i8* byval %q, i16 signext %r) [[FN_ATTRS:#[0-9]+]]
+; CHECK: call signext i16 (...) @vararg_fn(i8* %p, i8* byval(i8) %q, i16 signext %r) [[FN_ATTRS:#[0-9]+]]
define void @test_callee_3(i8* %p, ...) {
call signext i16 (...) @vararg_fn()
define void @foo(i8* %context) nounwind {
entry:
%tmp1 = bitcast i8* %context to %struct.NSRect* ; <%struct.NSRect*> [#uses=1]
- call void (i32, ...) @bar( i32 3, %struct.NSRect* byval align 4 %tmp1 ) nounwind
+ call void (i32, ...) @bar( i32 3, %struct.NSRect* byval(%struct.NSRect) align 4 %tmp1 ) nounwind
ret void
}
; CHECK-NEXT: store i32 0, i32* [[TMP1]], align 4
; CHECK-NEXT: [[TMP2:%.*]] = bitcast %struct.Key* [[IOSPEC]] to i64*
; CHECK-NEXT: store i64 [[KEY_TOKEN2:%.*]], i64* [[TMP2]], align 8
-; CHECK-NEXT: [[TMP3:%.*]] = call i32 (...) @foo(%struct.Key* nonnull byval align 4 [[IOSPEC]], i32* nonnull [[RET]]) [[ATTR0:#.*]]
+; CHECK-NEXT: [[TMP3:%.*]] = call i32 (...) @foo(%struct.Key* nonnull byval(%struct.Key) align 4 [[IOSPEC]], i32* nonnull [[RET]]) [[ATTR0:#.*]]
; CHECK-NEXT: [[TMP4:%.*]] = load i32, i32* [[RET]], align 4
; CHECK-NEXT: ret i32 [[TMP4]]
;
%3 = getelementptr %struct.Key, %struct.Key* %iospec, i32 0, i32 0 ; <{ i32, i32 }*> [#uses=1]
%4 = bitcast { i32, i32 }* %3 to i64* ; <i64*> [#uses=1]
store i64 %key_token2, i64* %4, align 4
- %5 = call i32 (...) @foo(%struct.Key* byval align 4 %iospec, i32* %ret) nounwind ; <i32> [#uses=0]
+ %5 = call i32 (...) @foo(%struct.Key* byval(%struct.Key) align 4 %iospec, i32* %ret) nounwind ; <i32> [#uses=0]
%6 = load i32, i32* %ret, align 4 ; <i32> [#uses=1]
ret i32 %6
}
ret i32 %call
}
-declare i1 @fn5({ i32, i32 }* byval align 4 %r)
+declare i1 @fn5({ i32, i32 }* byval({ i32, i32 }) align 4 %r)
define i1 @test5() {
; CHECK-LABEL: @test5
define void @test14() nounwind readnone {
entry:
%tmp = bitcast i32 (i8* (i8*)*)* @test14f to i32 (i32*)*
- %call10 = call i32 %tmp(i32* byval undef)
+ %call10 = call i32 %tmp(i32* byval(i32) undef)
ret void
}
define void @test4() {
; CHECK-LABEL: @test4(
-; CHECK-NEXT: call void @baz(i8* byval getelementptr inbounds (%T, %T* @G, i64 0, i32 0))
+; CHECK-NEXT: call void @baz(i8* byval(i8) getelementptr inbounds (%T, %T* @G, i64 0, i32 0))
; CHECK-NEXT: ret void
;
%A = alloca %T
%a = bitcast %T* %A to i8*
call void @llvm.memcpy.p0i8.p0i8.i64(i8* align 4 %a, i8* align 4 bitcast (%T* @G to i8*), i64 124, i1 false)
- call void @baz(i8* byval %a)
+ call void @baz(i8* byval(i8) %a)
ret void
}
declare void @llvm.lifetime.start.p0i8(i64, i8*)
define void @test5() {
; CHECK-LABEL: @test5(
-; CHECK-NEXT: call void @baz(i8* byval getelementptr inbounds (%T, %T* @G, i64 0, i32 0))
+; CHECK-NEXT: call void @baz(i8* byval(i8) getelementptr inbounds (%T, %T* @G, i64 0, i32 0))
; CHECK-NEXT: ret void
;
%A = alloca %T
%a = bitcast %T* %A to i8*
call void @llvm.lifetime.start.p0i8(i64 -1, i8* %a)
call void @llvm.memcpy.p0i8.p0i8.i64(i8* align 4 %a, i8* align 4 bitcast (%T* @G to i8*), i64 124, i1 false)
- call void @baz(i8* byval %a)
+ call void @baz(i8* byval(i8) %a)
ret void
}
-declare void @baz(i8* byval)
+declare void @baz(i8* byval(i8))
define void @test6() {
; PR8644
define void @test4(i8 *%P) {
; CHECK-LABEL: @test4(
-; CHECK-NEXT: call void @test4a(i8* byval align 1 [[P:%.*]])
+; CHECK-NEXT: call void @test4a(i8* byval(i8) align 1 [[P:%.*]])
; CHECK-NEXT: ret void
;
%A = alloca %1
%a = bitcast %1* %A to i8*
call void @llvm.memcpy.p0i8.p0i8.i64(i8* align 4 %a, i8* align 4 %P, i64 8, i1 false)
- call void @test4a(i8* align 1 byval %a)
+ call void @test4a(i8* align 1 byval(i8) %a)
ret void
}
; CHECK-NEXT: [[A1:%.*]] = alloca [[TMP1:%.*]], align 8
; CHECK-NEXT: [[A2:%.*]] = bitcast %1* [[A1]] to i8*
; CHECK-NEXT: call void @llvm.memcpy.p0i8.p1i8.i64(i8* align 4 [[A2]], i8 addrspace(1)* align 4 [[P:%.*]], i64 8, i1 false)
-; CHECK-NEXT: call void @test4a(i8* byval align 1 [[A2]])
+; CHECK-NEXT: call void @test4a(i8* byval(i8) align 1 [[A2]])
; CHECK-NEXT: ret void
;
%a1 = alloca %1
%a2 = bitcast %1* %a1 to i8*
call void @llvm.memcpy.p0i8.p1i8.i64(i8* align 4 %a2, i8 addrspace(1)* align 4 %P, i64 8, i1 false)
- call void @test4a(i8* align 1 byval %a2)
+ call void @test4a(i8* align 1 byval(i8) %a2)
ret void
}
; CHECK-NEXT: [[A2:%.*]] = bitcast %1* [[A1]] to i8*
; CHECK-NEXT: call void @llvm.memcpy.p0i8.p0i8.i64(i8* align 4 [[A2]], i8* align 4 [[P:%.*]], i64 8, i1 false)
; CHECK-NEXT: store i8 0, i8* [[A2]], align 1
-; CHECK-NEXT: call void @test4a(i8* byval align 1 [[A2]])
+; CHECK-NEXT: call void @test4a(i8* byval(i8) align 1 [[A2]])
; CHECK-NEXT: ret void
;
%a1 = alloca %1
%a2 = bitcast %1* %a1 to i8*
call void @llvm.memcpy.p0i8.p0i8.i64(i8* align 4 %a2, i8* align 4 %P, i64 8, i1 false)
store i8 0, i8* %a2
- call void @test4a(i8* align 1 byval %a2)
+ call void @test4a(i8* align 1 byval(i8) %a2)
ret void
}
; CHECK-NEXT: [[A2:%.*]] = bitcast %1* [[A1]] to i8*
; CHECK-NEXT: call void @llvm.memcpy.p0i8.p0i8.i64(i8* align 4 [[A2]], i8* align 4 [[P:%.*]], i64 8, i1 false)
; CHECK-NEXT: [[X:%.*]] = load i8, i8* [[A2]], align 1
-; CHECK-NEXT: call void @test4a(i8* byval align 1 [[A2]])
+; CHECK-NEXT: call void @test4a(i8* byval(i8) align 1 [[A2]])
; CHECK-NEXT: ret i8 [[X]]
;
%a1 = alloca %1
%a2 = bitcast %1* %a1 to i8*
call void @llvm.memcpy.p0i8.p0i8.i64(i8* align 4 %a2, i8* align 4 %P, i64 8, i1 false)
%x = load i8, i8* %a2
- call void @test4a(i8* align 1 byval %a2)
+ call void @test4a(i8* align 1 byval(i8) %a2)
ret i8 %x
}
; CHECK-NEXT: call void @llvm.memcpy.p0i8.p0i8.i64(i8* align 4 [[A2]], i8* align 4 [[P:%.*]], i64 8, i1 false)
; CHECK-NEXT: br i1 [[C:%.*]], label [[CALL:%.*]], label [[EXIT:%.*]]
; CHECK: call:
-; CHECK-NEXT: call void @test4a(i8* byval align 1 [[A2]])
+; CHECK-NEXT: call void @test4a(i8* byval(i8) align 1 [[A2]])
; CHECK-NEXT: br label [[EXIT]]
; CHECK: exit:
; CHECK-NEXT: ret void
br i1 %c, label %call, label %exit
call:
- call void @test4a(i8* align 1 byval %a2)
+ call void @test4a(i8* align 1 byval(i8) %a2)
br label %exit
exit:
ret void
}
-declare void @test4a(i8* align 1 byval)
+declare void @test4a(i8* align 1 byval(i8))
declare void @llvm.memcpy.p0i8.p0i8.i64(i8* nocapture, i8* nocapture, i64, i1) nounwind
declare void @llvm.memcpy.p0i8.p1i8.i64(i8* nocapture, i8 addrspace(1)* nocapture, i64, i1) nounwind
declare void @llvm.memcpy.p1i8.p1i8.i64(i8 addrspace(1)* nocapture, i8 addrspace(1)* nocapture, i64, i1) nounwind
@sS = external global %struct.S, align 16
-declare void @test5a(%struct.S* align 16 byval) nounwind ssp
+declare void @test5a(%struct.S* align 16 byval(%struct.S)) nounwind ssp
; rdar://8713376 - This memcpy can't be eliminated.
; CHECK-NEXT: call void @llvm.memcpy.p0i8.p0i8.i64(i8* align 16 [[TMP]], i8* align 16 bitcast (%struct.S* @sS to i8*), i64 32, i1 false)
; CHECK-NEXT: [[A:%.*]] = getelementptr [[STRUCT_S]], %struct.S* [[Y]], i64 0, i32 1, i64 0
; CHECK-NEXT: store i8 4, i8* [[A]], align 1
-; CHECK-NEXT: call void @test5a(%struct.S* byval align 16 [[Y]])
+; CHECK-NEXT: call void @test5a(%struct.S* byval(%struct.S) align 16 [[Y]])
; CHECK-NEXT: ret i32 0
;
entry:
call void @llvm.memcpy.p0i8.p0i8.i64(i8* align 16 %tmp, i8* align 16 bitcast (%struct.S* @sS to i8*), i64 32, i1 false)
%a = getelementptr %struct.S, %struct.S* %y, i64 0, i32 1, i64 0
store i8 4, i8* %a
- call void @test5a(%struct.S* align 16 byval %y)
+ call void @test5a(%struct.S* align 16 byval(%struct.S) %y)
ret i32 0
}
; isn't itself 8 byte aligned.
%struct.p = type { i32, i32, i32, i32, i32, i32, i32, i32, i32, i32, i32, i32 }
-define i32 @test7(%struct.p* nocapture align 8 byval %q) nounwind ssp {
+define i32 @test7(%struct.p* nocapture align 8 byval(%struct.p) %q) nounwind ssp {
; CHECK-LABEL: @test7(
; CHECK-NEXT: entry:
-; CHECK-NEXT: [[CALL:%.*]] = call i32 @g(%struct.p* byval align 8 [[Q:%.*]]) [[ATTR0]]
+; CHECK-NEXT: [[CALL:%.*]] = call i32 @g(%struct.p* byval(%struct.p) align 8 [[Q:%.*]]) [[ATTR0]]
; CHECK-NEXT: ret i32 [[CALL]]
;
entry:
%tmp = bitcast %struct.p* %agg.tmp to i8*
%tmp1 = bitcast %struct.p* %q to i8*
call void @llvm.memcpy.p0i8.p0i8.i64(i8* align 4 %tmp, i8* align 4 %tmp1, i64 48, i1 false)
- %call = call i32 @g(%struct.p* align 8 byval %agg.tmp) nounwind
+ %call = call i32 @g(%struct.p* align 8 byval(%struct.p) %agg.tmp) nounwind
ret i32 %call
}
-declare i32 @g(%struct.p* align 8 byval)
+declare i32 @g(%struct.p* align 8 byval(%struct.p))
declare void @llvm.memcpy.p0i8.p0i8.i32(i8* nocapture, i8* nocapture, i32, i1) nounwind
@.str = private constant [11 x i8] c"0123456789\00"
@cell = external global %struct.s
-declare void @check(%struct.s* byval %p) nounwind
+declare void @check(%struct.s* byval(%struct.s) %p) nounwind
declare void @llvm.memcpy.p0i8.p0i8.i32(i8* nocapture, i8* nocapture, i32, i1) nounwind
define void @foo() nounwind {
; CHECK-NEXT: call void @llvm.memcpy.p0i8.p0i8.i32(i8* align 1 getelementptr inbounds (%struct.s, %struct.s* @cell, i32 0, i32 0, i32 0), i8* align 1 getelementptr inbounds ([11 x i8], [11 x i8]* @.str, i32 0, i32 0), i32 11, i1 false)
; CHECK-NEXT: [[TMP:%.*]] = getelementptr inbounds [[STRUCT_S]], %struct.s* [[AGG_TMP]], i32 0, i32 0, i32 0
; CHECK-NEXT: call void @llvm.memcpy.p0i8.p0i8.i32(i8* align 4 [[TMP]], i8* align 4 getelementptr inbounds (%struct.s, %struct.s* @cell, i32 0, i32 0, i32 0), i32 16, i1 false)
-; CHECK-NEXT: call void @check(%struct.s* byval [[AGG_TMP]])
+; CHECK-NEXT: call void @check(%struct.s* byval(%struct.s) [[AGG_TMP]])
; CHECK-NEXT: ret void
;
entry:
call void @llvm.memcpy.p0i8.p0i8.i32(i8* align 1 getelementptr inbounds (%struct.s, %struct.s* @cell, i32 0, i32 0, i32 0), i8* align 1 getelementptr inbounds ([11 x i8], [11 x i8]* @.str, i32 0, i32 0), i32 11, i1 false)
%tmp = getelementptr inbounds %struct.s, %struct.s* %agg.tmp, i32 0, i32 0, i32 0
call void @llvm.memcpy.p0i8.p0i8.i32(i8* align 4 %tmp, i8* align 4 getelementptr inbounds (%struct.s, %struct.s* @cell, i32 0, i32 0, i32 0), i32 16, i1 false)
- call void @check(%struct.s* byval %agg.tmp)
+ call void @check(%struct.s* byval(%struct.s) %agg.tmp)
ret void
}
%0 = type { x86_fp80, x86_fp80 }
-define void @ccosl(%0* noalias sret %agg.result, %0* byval align 8 %z) nounwind {
+define void @ccosl(%0* noalias sret %agg.result, %0* byval(%0) align 8 %z) nounwind {
; CHECK-LABEL: @ccosl(
; CHECK-NEXT: entry:
; CHECK-NEXT: [[IZ:%.*]] = alloca [[TMP0:%.*]], align 16
; CHECK-NEXT: [[TMP8:%.*]] = load x86_fp80, x86_fp80* [[TMP7]], align 16
; CHECK-NEXT: store x86_fp80 [[TMP3]], x86_fp80* [[REAL]], align 16
; CHECK-NEXT: store x86_fp80 [[TMP8]], x86_fp80* [[TMP4]], align 16
-; CHECK-NEXT: call void @ccoshl(%0* noalias sret [[AGG_RESULT:%.*]], %0* byval align 8 [[IZ]]) [[ATTR0:#.*]]
+; CHECK-NEXT: call void @ccoshl(%0* noalias sret [[AGG_RESULT:%.*]], %0* byval(%0) align 8 [[IZ]]) [[ATTR0:#.*]]
; CHECK-NEXT: [[MEMTMP14:%.*]] = bitcast %0* [[MEMTMP]] to i8*
; CHECK-NEXT: [[AGG_RESULT15:%.*]] = bitcast %0* [[AGG_RESULT]] to i8*
; CHECK-NEXT: ret void
%tmp8 = load x86_fp80, x86_fp80* %tmp7, align 16
store x86_fp80 %tmp3, x86_fp80* %real, align 16
store x86_fp80 %tmp8, x86_fp80* %tmp4, align 16
- call void @ccoshl(%0* noalias sret %memtmp, %0* byval align 8 %iz) nounwind
+ call void @ccoshl(%0* noalias sret %memtmp, %0* byval(%0) align 8 %iz) nounwind
%memtmp14 = bitcast %0* %memtmp to i8*
%agg.result15 = bitcast %0* %agg.result to i8*
call void @llvm.memcpy.p0i8.p0i8.i32(i8* align 16 %agg.result15, i8* align 16 %memtmp14, i32 32, i1 false)
ret void
}
-declare void @ccoshl(%0* noalias nocapture sret, %0* byval) nounwind
+declare void @ccoshl(%0* noalias nocapture sret, %0* byval(%0)) nounwind
declare void @llvm.memcpy.p0i8.p0i8.i32(i8* nocapture, i8* nocapture, i32, i1) nounwind
; CHECK-LABEL: define void @foo
; CHECK: call void %bc
-define void @foo(i8* byval %a0, i8* swiftself %a4) {
+define void @foo(i8* byval(i8) %a0, i8* swiftself %a4) {
entry:
%bc = bitcast i8* %a0 to void (i8*, i8*)*
- call void %bc(i8* byval %a0, i8* swiftself %a4)
+ call void %bc(i8* byval(i8) %a0, i8* swiftself %a4)
ret void
}
%class.a = type { i32, i32, i32, i32, i32 }
; Function Attrs: nounwind optsize
-define dso_local zeroext i1 @pr41917(%class.a* byval nocapture readonly align 4 %g, %class.a* byval nocapture readonly align 4 %p2) local_unnamed_addr #0 {
+define dso_local zeroext i1 @pr41917(%class.a* byval(%class.a) nocapture readonly align 4 %g, %class.a* byval(%class.a) nocapture readonly align 4 %p2) local_unnamed_addr #0 {
; CHECK-LABEL: @pr41917(
; CHECK-NEXT: entry:
; CHECK-NEXT: [[CALL:%.*]] = tail call zeroext i1 @f2() #3
; RUN: opt < %s -basic-aa -newgvn
target datalayout = "e-p:64:64:64-i1:8:8-i8:8:8-i16:16:16-i32:32:32-i64:64:64-f32:32:32-f64:64:64-v64:64:64-v128:128:128-a0:0:64-s0:64:64-f80:128:128-n8:16:32:64-S128"
%struct.S0 = type { [2 x i8], [2 x i8], [4 x i8], [2 x i8], i32, i32, i32, i32 }
-define void @fn1(%struct.S0* byval align 8 %p1) {
+define void @fn1(%struct.S0* byval(%struct.S0) align 8 %p1) {
br label %for.cond
for.cond: ; preds = %1, %0
br label %for.end
ret i32 %v
}
-define i32 @func5(%struct.Foo.1* byval %p) {
+define i32 @func5(%struct.Foo.1* byval(%struct.Foo.1) %p) {
entry:
%gep = getelementptr inbounds %struct.Foo.1, %struct.Foo.1* %p, i32 0, i32 0
%v = load i32, i32* %gep
; PR5038
%struct.MYstr = type { i8, i32 }
@mystr = internal global %struct.MYstr zeroinitializer ; <%struct.MYstr*> [#uses=3]
-define internal void @vfu1(%struct.MYstr* byval align 4 %u) nounwind {
+define internal void @vfu1(%struct.MYstr* byval(%struct.MYstr) align 4 %u) nounwind {
entry:
%0 = getelementptr %struct.MYstr, %struct.MYstr* %u, i32 0, i32 1 ; <i32*> [#uses=1]
store i32 99, i32* %0, align 4
ret void
}
-define internal i32 @vfu2(%struct.MYstr* byval align 4 %u) nounwind readonly {
+define internal i32 @vfu2(%struct.MYstr* byval(%struct.MYstr) align 4 %u) nounwind readonly {
entry:
%0 = getelementptr %struct.MYstr, %struct.MYstr* %u, i32 0, i32 1 ; <i32*> [#uses=1]
%1 = load i32, i32* %0
define i32 @unions() nounwind {
entry:
- call void @vfu1(%struct.MYstr* byval align 4 @mystr) nounwind
- %result = call i32 @vfu2(%struct.MYstr* byval align 4 @mystr) nounwind
+ call void @vfu1(%struct.MYstr* byval(%struct.MYstr) align 4 @mystr) nounwind
+ %result = call i32 @vfu2(%struct.MYstr* byval(%struct.MYstr) align 4 @mystr) nounwind
; CHECK: ret i32 %result
ret i32 %result
}
ret float %add.3
}
-define double @dotd(<4 x double>* byval nocapture readonly align 32, <4 x double>* byval nocapture readonly align 32) {
+define double @dotd(<4 x double>* byval(<4 x double>) nocapture readonly align 32, <4 x double>* byval(<4 x double>) nocapture readonly align 32) {
; CHECK-LABEL: @dotd(
; CHECK-NEXT: entry:
; CHECK-NEXT: [[X:%.*]] = load <4 x double>, <4 x double>* [[TMP0:%.*]], align 32
%struct.S = type { [100 x i32] }
; Safe access to a byval argument.
-define i32 @ByValSafe(%struct.S* byval nocapture readonly align 8 %zzz) norecurse nounwind readonly safestack uwtable {
+define i32 @ByValSafe(%struct.S* byval(%struct.S) nocapture readonly align 8 %zzz) norecurse nounwind readonly safestack uwtable {
entry:
; CHECK-LABEL: @ByValSafe
; CHECK-NOT: __safestack_unsafe_stack_ptr
; Unsafe access to a byval argument.
; Argument is copied to the unsafe stack.
-define i32 @ByValUnsafe(%struct.S* byval nocapture readonly align 8 %zzz, i64 %idx) norecurse nounwind readonly safestack uwtable {
+define i32 @ByValUnsafe(%struct.S* byval(%struct.S) nocapture readonly align 8 %zzz, i64 %idx) norecurse nounwind readonly safestack uwtable {
entry:
; CHECK-LABEL: @ByValUnsafe
; CHECK: %[[A:.*]] = load {{.*}} @__safestack_unsafe_stack_ptr
; Unsafe access to a byval argument.
; Argument is copied to the unsafe stack.
; Check that dest align of memcpy is set according to datalayout prefered alignment
-define i32 @ByValUnsafe2(%struct.S* byval nocapture readonly %zzz, i64 %idx) norecurse nounwind readonly safestack uwtable {
+define i32 @ByValUnsafe2(%struct.S* byval(%struct.S) nocapture readonly %zzz, i64 %idx) norecurse nounwind readonly safestack uwtable {
entry:
; CHECK-LABEL: @ByValUnsafe
; CHECK: %[[A:.*]] = load {{.*}} @__safestack_unsafe_stack_ptr
}
; Highly aligned byval argument.
-define i32 @ByValUnsafeAligned(%struct.S* byval nocapture readonly align 64 %zzz, i64 %idx) norecurse nounwind readonly safestack uwtable {
+define i32 @ByValUnsafeAligned(%struct.S* byval(%struct.S) nocapture readonly align 64 %zzz, i64 %idx) norecurse nounwind readonly safestack uwtable {
entry:
; CHECK-LABEL: @ByValUnsafeAligned
; CHECK: %[[A:.*]] = load {{.*}} @__safestack_unsafe_stack_ptr
%struct.S = type { [100 x i8] }
; Function Attrs: safestack uwtable
-define void @f(%struct.S* byval align 8 %zzz) #0 !dbg !12 {
+define void @f(%struct.S* byval(%struct.S) align 8 %zzz) #0 !dbg !12 {
; CHECK: define void @f
entry:
}
; Don't tail call if a byval arg is captured.
-define void @test9(i32* byval %a) {
+define void @test9(i32* byval(i32) %a) {
; CHECK-LABEL: define void @test9(
; CHECK: {{^ *}}call void @use(
call void @use(i32* %a)
; point, and both calls below can be marked tail.
define void @test13() {
; CHECK-LABEL: @test13
-; CHECK: tail call void @bar(%struct.foo* byval %f)
+; CHECK: tail call void @bar(%struct.foo* byval(%struct.foo) %f)
; CHECK: tail call void @bar(%struct.foo* null)
entry:
%f = alloca %struct.foo
- call void @bar(%struct.foo* byval %f)
+ call void @bar(%struct.foo* byval(%struct.foo) %f)
call void @bar(%struct.foo* null)
ret void
}
; A call which passes a byval parameter using byval can be marked tail.
-define void @test14(%struct.foo* byval %f) {
+define void @test14(%struct.foo* byval(%struct.foo) %f) {
; CHECK-LABEL: @test14
; CHECK: tail call void @bar
entry:
- call void @bar(%struct.foo* byval %f)
+ call void @bar(%struct.foo* byval(%struct.foo) %f)
ret void
}
; If a byval parameter is copied into an alloca and passed byval the call can
; be marked tail.
-define void @test15(%struct.foo* byval %f) {
+define void @test15(%struct.foo* byval(%struct.foo) %f) {
; CHECK-LABEL: @test15
; CHECK: tail call void @bar
entry:
%0 = bitcast %struct.foo* %agg.tmp to i8*
%1 = bitcast %struct.foo* %f to i8*
call void @llvm.memcpy.p0i8.p0i8.i64(i8* %0, i8* %1, i64 40, i1 false)
- call void @bar(%struct.foo* byval %agg.tmp)
+ call void @bar(%struct.foo* byval(%struct.foo) %agg.tmp)
ret void
}
-declare void @bar(%struct.foo* byval)
+declare void @bar(%struct.foo* byval(%struct.foo))
declare void @llvm.memcpy.p0i8.p0i8.i64(i8* nocapture writeonly, i8* nocapture readonly, i64, i1)
; CHECK: Calling convention disallows byval
; CHECK-NEXT: void (i32 addrspace(5)*)* @byval_cc_amdgpu_kernel
-define amdgpu_kernel void @byval_cc_amdgpu_kernel(i32 addrspace(5)* byval %ptr) {
+define amdgpu_kernel void @byval_cc_amdgpu_kernel(i32 addrspace(5)* byval(i32) %ptr) {
ret void
}
; CHECK: Calling convention disallows byval
; CHECK-NEXT: void (i32 addrspace(1)*)* @byval_as1_cc_amdgpu_kernel
-define amdgpu_kernel void @byval_as1_cc_amdgpu_kernel(i32 addrspace(1)* byval %ptr) {
+define amdgpu_kernel void @byval_as1_cc_amdgpu_kernel(i32 addrspace(1)* byval(i32) %ptr) {
ret void
}
; CHECK: Calling convention disallows byval
; CHECK-NEXT: void (i32*)* @byval_as0_cc_amdgpu_kernel
-define amdgpu_kernel void @byval_as0_cc_amdgpu_kernel(i32* byval %ptr) {
+define amdgpu_kernel void @byval_as0_cc_amdgpu_kernel(i32* byval(i32) %ptr) {
ret void
}
; CHECK: Calling convention disallows byval
; CHECK-NEXT: void (i32 addrspace(5)*)* @byval_cc_amdgpu_vs
-define amdgpu_vs void @byval_cc_amdgpu_vs(i32 addrspace(5)* byval %ptr) {
+define amdgpu_vs void @byval_cc_amdgpu_vs(i32 addrspace(5)* byval(i32) %ptr) {
ret void
}
; CHECK: Calling convention disallows byval
; CHECK-NEXT: void (i32 addrspace(5)*)* @byval_cc_amdgpu_hs
-define amdgpu_hs void @byval_cc_amdgpu_hs(i32 addrspace(5)* byval %ptr) {
+define amdgpu_hs void @byval_cc_amdgpu_hs(i32 addrspace(5)* byval(i32) %ptr) {
ret void
}
; CHECK: Calling convention disallows byval
; CHECK-NEXT: void (i32 addrspace(5)*)* @byval_cc_amdgpu_gs
-define amdgpu_gs void @byval_cc_amdgpu_gs(i32 addrspace(5)* byval %ptr) {
+define amdgpu_gs void @byval_cc_amdgpu_gs(i32 addrspace(5)* byval(i32) %ptr) {
ret void
}
; CHECK: Calling convention disallows byval
; CHECK-NEXT: void (i32 addrspace(5)*)* @byval_cc_amdgpu_ps
-define amdgpu_ps void @byval_cc_amdgpu_ps(i32 addrspace(5)* byval %ptr) {
+define amdgpu_ps void @byval_cc_amdgpu_ps(i32 addrspace(5)* byval(i32) %ptr) {
ret void
}
; CHECK: Calling convention disallows byval
; CHECK-NEXT: void (i32 addrspace(5)*)* @byval_cc_amdgpu_cs
-define amdgpu_cs void @byval_cc_amdgpu_cs(i32 addrspace(5)* byval %ptr) {
+define amdgpu_cs void @byval_cc_amdgpu_cs(i32 addrspace(5)* byval(i32) %ptr) {
ret void
}
; RUN: llvm-as %s -o /dev/null
%struct.foo = type { i64 }
-declare void @h(%struct.foo* byval %num)
+declare void @h(%struct.foo* byval(%struct.foo) %num)
; RUN: not llvm-as %s -o /dev/null 2>&1 | FileCheck %s
-declare void @a(i64* byval inalloca %p)
+declare void @a(i64* byval(i64) inalloca %p)
; CHECK: Attributes {{.*}} are incompatible
declare void @b(i64* inreg inalloca %p)
}
declare void @mismatched_byval_callee({ i32 }*)
-define void @mismatched_byval({ i32 }* byval %a) {
+define void @mismatched_byval({ i32 }* byval({ i32 }) %a) {
; CHECK: mismatched ABI impacting function attributes
musttail call void @mismatched_byval_callee({ i32 }* %a)
ret void
ret void
}
-declare void @mismatched_alignment_callee(i32* byval align 8)
-define void @mismatched_alignment(i32* byval align 4 %a) {
+declare void @mismatched_alignment_callee(i32* byval(i32) align 8)
+define void @mismatched_alignment(i32* byval(i32) align 4 %a) {
; CHECK: mismatched ABI impacting function attributes
- musttail call void @mismatched_alignment_callee(i32* byval align 8 %a)
+ musttail call void @mismatched_alignment_callee(i32* byval(i32) align 8 %a)
ret void
}