From: LemonBoy Date: Sun, 9 May 2021 16:51:05 +0000 (+0200) Subject: [SelectionDAG] Regenerate test checks (NFC) X-Git-Tag: llvmorg-14-init~7226 X-Git-Url: http://review.tizen.org/git/?a=commitdiff_plain;h=ad5f3f525828e1e499e94a5065bada5b5df936cb;p=platform%2Fupstream%2Fllvm.git [SelectionDAG] Regenerate test checks (NFC) --- diff --git a/llvm/test/CodeGen/X86/arg-copy-elide.ll b/llvm/test/CodeGen/X86/arg-copy-elide.ll index 36510e0..1821578 100644 --- a/llvm/test/CodeGen/X86/arg-copy-elide.ll +++ b/llvm/test/CodeGen/X86/arg-copy-elide.ll @@ -1,3 +1,4 @@ +; NOTE: Assertions have been autogenerated by utils/update_llc_test_checks.py ; RUN: llc -mtriple=i686-windows < %s | FileCheck %s declare void @addrof_i1(i1*) @@ -7,6 +8,13 @@ declare void @addrof_i128(i128*) declare void @addrof_i32_x3(i32*, i32*, i32*) define void @simple(i32 %x) { +; CHECK-LABEL: simple: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: leal {{[0-9]+}}(%esp), %eax +; CHECK-NEXT: pushl %eax +; CHECK-NEXT: calll _addrof_i32 +; CHECK-NEXT: addl $4, %esp +; CHECK-NEXT: retl entry: %x.addr = alloca i32 store i32 %x, i32* %x.addr @@ -14,17 +22,21 @@ entry: ret void } -; CHECK-LABEL: _simple: -; CHECK: leal 4(%esp), %[[reg:[^ ]*]] -; CHECK: pushl %[[reg]] -; CHECK: calll _addrof_i32 -; CHECK: retl - - ; We need to load %x before calling addrof_i32 now because it could mutate %x in ; place. define i32 @use_arg(i32 %x) { +; CHECK-LABEL: use_arg: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: pushl %esi +; CHECK-NEXT: movl {{[0-9]+}}(%esp), %esi +; CHECK-NEXT: leal {{[0-9]+}}(%esp), %eax +; CHECK-NEXT: pushl %eax +; CHECK-NEXT: calll _addrof_i32 +; CHECK-NEXT: addl $4, %esp +; CHECK-NEXT: movl %esi, %eax +; CHECK-NEXT: popl %esi +; CHECK-NEXT: retl entry: %x.addr = alloca i32 store i32 %x, i32* %x.addr @@ -32,19 +44,24 @@ entry: ret i32 %x } -; CHECK-LABEL: _use_arg: -; CHECK: pushl %[[csr:[^ ]*]] -; CHECK-DAG: movl 8(%esp), %[[csr]] -; CHECK-DAG: leal 8(%esp), %[[reg:[^ ]*]] -; CHECK: pushl %[[reg]] -; CHECK: calll _addrof_i32 -; CHECK: movl %[[csr]], %eax -; CHECK: popl %[[csr]] -; CHECK: retl - ; We won't copy elide for types needing legalization such as i64 or i1. define i64 @split_i64(i64 %x) { +; CHECK-LABEL: split_i64: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: pushl %edi +; CHECK-NEXT: pushl %esi +; CHECK-NEXT: movl {{[0-9]+}}(%esp), %esi +; CHECK-NEXT: movl {{[0-9]+}}(%esp), %edi +; CHECK-NEXT: leal {{[0-9]+}}(%esp), %eax +; CHECK-NEXT: pushl %eax +; CHECK-NEXT: calll _addrof_i64 +; CHECK-NEXT: addl $4, %esp +; CHECK-NEXT: movl %esi, %eax +; CHECK-NEXT: movl %edi, %edx +; CHECK-NEXT: popl %esi +; CHECK-NEXT: popl %edi +; CHECK-NEXT: retl entry: %x.addr = alloca i64, align 4 store i64 %x, i64* %x.addr, align 4 @@ -52,43 +69,47 @@ entry: ret i64 %x } -; CHECK-LABEL: _split_i64: -; CHECK: pushl %[[csr2:[^ ]*]] -; CHECK: pushl %[[csr1:[^ ]*]] -; CHECK-DAG: movl 12(%esp), %[[csr1]] -; CHECK-DAG: movl 16(%esp), %[[csr2]] -; CHECK-DAG: leal 12(%esp), %[[reg:[^ ]*]] -; CHECK: pushl %[[reg]] -; CHECK: calll _addrof_i64 -; CHECK: addl $4, %esp -; CHECK-DAG: movl %[[csr1]], %eax -; CHECK-DAG: movl %[[csr2]], %edx -; CHECK: popl %[[csr1]] -; CHECK: popl %[[csr2]] -; CHECK: retl - define i1 @i1_arg(i1 %x) { +; CHECK-LABEL: i1_arg: +; CHECK: # %bb.0: +; CHECK-NEXT: pushl %ebx +; CHECK-NEXT: movb {{[0-9]+}}(%esp), %bl +; CHECK-NEXT: leal {{[0-9]+}}(%esp), %eax +; CHECK-NEXT: pushl %eax +; CHECK-NEXT: calll _addrof_i1 +; CHECK-NEXT: addl $4, %esp +; CHECK-NEXT: movl %ebx, %eax +; CHECK-NEXT: popl %ebx +; CHECK-NEXT: retl %x.addr = alloca i1 store i1 %x, i1* %x.addr call void @addrof_i1(i1* %x.addr) ret i1 %x } -; CHECK-LABEL: _i1_arg: -; CHECK: pushl %ebx -; CHECK: movb 8(%esp), %bl -; CHECK: leal 8(%esp), %eax -; CHECK: pushl %eax -; CHECK: calll _addrof_i1 -; CHECK: addl $4, %esp -; CHECK: movl %ebx, %eax -; CHECK: popl %ebx -; CHECK: retl - ; We can't copy elide when an i64 is split between registers and memory in a ; fastcc function. define fastcc i64 @fastcc_split_i64(i64* %p, i64 %x) { +; CHECK-LABEL: fastcc_split_i64: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: pushl %edi +; CHECK-NEXT: pushl %esi +; CHECK-NEXT: subl $8, %esp +; CHECK-NEXT: movl %edx, %esi +; CHECK-NEXT: movl {{[0-9]+}}(%esp), %edi +; CHECK-NEXT: movl %edi, {{[0-9]+}}(%esp) +; CHECK-NEXT: movl %edx, (%esp) +; CHECK-NEXT: movl %esp, %eax +; CHECK-NEXT: pushl %eax +; CHECK-NEXT: calll _addrof_i64 +; CHECK-NEXT: addl $4, %esp +; CHECK-NEXT: movl %esi, %eax +; CHECK-NEXT: movl %edi, %edx +; CHECK-NEXT: addl $8, %esp +; CHECK-NEXT: popl %esi +; CHECK-NEXT: popl %edi +; CHECK-NEXT: retl entry: %x.addr = alloca i64, align 4 store i64 %x, i64* %x.addr, align 4 @@ -96,20 +117,24 @@ entry: ret i64 %x } -; CHECK-LABEL: _fastcc_split_i64: -; CHECK-DAG: movl %edx, %[[r1:[^ ]*]] -; CHECK-DAG: movl 20(%esp), %[[r2:[^ ]*]] -; CHECK-DAG: movl %[[r2]], 4(%esp) -; CHECK-DAG: movl %edx, (%esp) -; CHECK: movl %esp, %[[reg:[^ ]*]] -; CHECK: pushl %[[reg]] -; CHECK: calll _addrof_i64 -; CHECK: retl - - ; We can't copy elide when it would reduce the user requested alignment. define void @high_alignment(i32 %x) { +; CHECK-LABEL: high_alignment: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: pushl %ebp +; CHECK-NEXT: movl %esp, %ebp +; CHECK-NEXT: andl $-128, %esp +; CHECK-NEXT: subl $128, %esp +; CHECK-NEXT: movl 8(%ebp), %eax +; CHECK-NEXT: movl %eax, (%esp) +; CHECK-NEXT: movl %esp, %eax +; CHECK-NEXT: pushl %eax +; CHECK-NEXT: calll _addrof_i32 +; CHECK-NEXT: addl $4, %esp +; CHECK-NEXT: movl %ebp, %esp +; CHECK-NEXT: popl %ebp +; CHECK-NEXT: retl entry: %x.p = alloca i32, align 128 store i32 %x, i32* %x.p @@ -117,21 +142,28 @@ entry: ret void } -; CHECK-LABEL: _high_alignment: -; CHECK: andl $-128, %esp -; CHECK: movl 8(%ebp), %[[reg:[^ ]*]] -; CHECK: movl %[[reg]], (%esp) -; CHECK: movl %esp, %[[reg:[^ ]*]] -; CHECK: pushl %[[reg]] -; CHECK: calll _addrof_i32 -; CHECK: retl - - ; We can't copy elide when it would reduce the ABI required alignment. ; FIXME: We should lower the ABI alignment of i64 on Windows, since MSVC ; doesn't guarantee it. define void @abi_alignment(i64 %x) { +; CHECK-LABEL: abi_alignment: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: pushl %ebp +; CHECK-NEXT: movl %esp, %ebp +; CHECK-NEXT: andl $-8, %esp +; CHECK-NEXT: subl $8, %esp +; CHECK-NEXT: movl 8(%ebp), %eax +; CHECK-NEXT: movl 12(%ebp), %ecx +; CHECK-NEXT: movl %ecx, {{[0-9]+}}(%esp) +; CHECK-NEXT: movl %eax, (%esp) +; CHECK-NEXT: movl %esp, %eax +; CHECK-NEXT: pushl %eax +; CHECK-NEXT: calll _addrof_i64 +; CHECK-NEXT: addl $4, %esp +; CHECK-NEXT: movl %ebp, %esp +; CHECK-NEXT: popl %ebp +; CHECK-NEXT: retl entry: %x.p = alloca i64 store i64 %x, i64* %x.p @@ -139,19 +171,43 @@ entry: ret void } -; CHECK-LABEL: _abi_alignment: -; CHECK: andl $-8, %esp -; CHECK: movl 8(%ebp), %[[reg:[^ ]*]] -; CHECK: movl %[[reg]], (%esp) -; CHECK: movl %esp, %[[reg:[^ ]*]] -; CHECK: pushl %[[reg]] -; CHECK: calll _addrof_i64 -; CHECK: retl - - ; The code we generate for this is unimportant. This is mostly a crash test. define void @split_i128(i128* %sret, i128 %x) { +; CHECK-LABEL: split_i128: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: pushl %ebp +; CHECK-NEXT: movl %esp, %ebp +; CHECK-NEXT: pushl %ebx +; CHECK-NEXT: pushl %edi +; CHECK-NEXT: pushl %esi +; CHECK-NEXT: andl $-8, %esp +; CHECK-NEXT: subl $32, %esp +; CHECK-NEXT: movl 12(%ebp), %eax +; CHECK-NEXT: movl %eax, {{[-0-9]+}}(%e{{[sb]}}p) # 4-byte Spill +; CHECK-NEXT: movl 16(%ebp), %ebx +; CHECK-NEXT: movl 20(%ebp), %esi +; CHECK-NEXT: movl 24(%ebp), %edi +; CHECK-NEXT: movl %edi, {{[0-9]+}}(%esp) +; CHECK-NEXT: movl %esi, {{[0-9]+}}(%esp) +; CHECK-NEXT: movl %ebx, {{[0-9]+}}(%esp) +; CHECK-NEXT: movl %eax, {{[0-9]+}}(%esp) +; CHECK-NEXT: leal {{[0-9]+}}(%esp), %eax +; CHECK-NEXT: pushl %eax +; CHECK-NEXT: calll _addrof_i128 +; CHECK-NEXT: addl $4, %esp +; CHECK-NEXT: movl 8(%ebp), %eax +; CHECK-NEXT: movl %edi, 12(%eax) +; CHECK-NEXT: movl %esi, 8(%eax) +; CHECK-NEXT: movl %ebx, 4(%eax) +; CHECK-NEXT: movl {{[-0-9]+}}(%e{{[sb]}}p), %ecx # 4-byte Reload +; CHECK-NEXT: movl %ecx, (%eax) +; CHECK-NEXT: leal -12(%ebp), %esp +; CHECK-NEXT: popl %esi +; CHECK-NEXT: popl %edi +; CHECK-NEXT: popl %ebx +; CHECK-NEXT: popl %ebp +; CHECK-NEXT: retl entry: %x.addr = alloca i128 store i128 %x, i128* %x.addr @@ -160,15 +216,26 @@ entry: ret void } -; CHECK-LABEL: _split_i128: -; CHECK: pushl %ebp -; CHECK: calll _addrof_i128 -; CHECK: retl - - ; Check that we load all of x, y, and z before the call. define i32 @three_args(i32 %x, i32 %y, i32 %z) { +; CHECK-LABEL: three_args: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: pushl %esi +; CHECK-NEXT: movl {{[0-9]+}}(%esp), %esi +; CHECK-NEXT: addl {{[0-9]+}}(%esp), %esi +; CHECK-NEXT: addl {{[0-9]+}}(%esp), %esi +; CHECK-NEXT: leal {{[0-9]+}}(%esp), %eax +; CHECK-NEXT: leal {{[0-9]+}}(%esp), %ecx +; CHECK-NEXT: leal {{[0-9]+}}(%esp), %edx +; CHECK-NEXT: pushl %eax +; CHECK-NEXT: pushl %ecx +; CHECK-NEXT: pushl %edx +; CHECK-NEXT: calll _addrof_i32_x3 +; CHECK-NEXT: addl $12, %esp +; CHECK-NEXT: movl %esi, %eax +; CHECK-NEXT: popl %esi +; CHECK-NEXT: retl entry: %z.addr = alloca i32, align 4 %y.addr = alloca i32, align 4 @@ -182,24 +249,16 @@ entry: ret i32 %sum } -; CHECK-LABEL: _three_args: -; CHECK: pushl %[[csr:[^ ]*]] -; CHECK-DAG: movl {{[0-9]+}}(%esp), %[[csr]] -; CHECK-DAG: addl {{[0-9]+}}(%esp), %[[csr]] -; CHECK-DAG: addl {{[0-9]+}}(%esp), %[[csr]] -; CHECK-DAG: leal 8(%esp), %[[x:[^ ]*]] -; CHECK-DAG: leal 12(%esp), %[[y:[^ ]*]] -; CHECK-DAG: leal 16(%esp), %[[z:[^ ]*]] -; CHECK: pushl %[[z]] -; CHECK: pushl %[[y]] -; CHECK: pushl %[[x]] -; CHECK: calll _addrof_i32_x3 -; CHECK: movl %[[csr]], %eax -; CHECK: popl %[[csr]] -; CHECK: retl - - define void @two_args_same_alloca(i32 %x, i32 %y) { +; CHECK-LABEL: two_args_same_alloca: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: movl {{[0-9]+}}(%esp), %eax +; CHECK-NEXT: movl %eax, {{[0-9]+}}(%esp) +; CHECK-NEXT: leal {{[0-9]+}}(%esp), %eax +; CHECK-NEXT: pushl %eax +; CHECK-NEXT: calll _addrof_i32 +; CHECK-NEXT: addl $4, %esp +; CHECK-NEXT: retl entry: %x.addr = alloca i32 store i32 %x, i32* %x.addr @@ -208,16 +267,17 @@ entry: ret void } -; CHECK-LABEL: _two_args_same_alloca: -; CHECK: movl 8(%esp), {{.*}} -; CHECK: movl {{.*}}, 4(%esp) -; CHECK: leal 4(%esp), %[[reg:[^ ]*]] -; CHECK: pushl %[[reg]] -; CHECK: calll _addrof_i32 -; CHECK: retl - - define void @avoid_byval(i32* byval(i32) %x) { +; CHECK-LABEL: avoid_byval: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: pushl %eax +; CHECK-NEXT: leal {{[0-9]+}}(%esp), %eax +; CHECK-NEXT: movl %eax, (%esp) +; CHECK-NEXT: pushl %eax +; CHECK-NEXT: calll _addrof_i32 +; CHECK-NEXT: addl $4, %esp +; CHECK-NEXT: popl %eax +; CHECK-NEXT: retl entry: %x.p.p = alloca i32* store i32* %x, i32** %x.p.p @@ -225,14 +285,17 @@ entry: ret void } -; CHECK-LABEL: _avoid_byval: -; CHECK: leal {{[0-9]+}}(%esp), %[[reg:[^ ]*]] -; CHECK: pushl %[[reg]] -; CHECK: calll _addrof_i32 -; CHECK: retl - - define void @avoid_inalloca(i32* inalloca(i32) %x) { +; CHECK-LABEL: avoid_inalloca: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: pushl %eax +; CHECK-NEXT: leal {{[0-9]+}}(%esp), %eax +; CHECK-NEXT: movl %eax, (%esp) +; CHECK-NEXT: pushl %eax +; CHECK-NEXT: calll _addrof_i32 +; CHECK-NEXT: addl $4, %esp +; CHECK-NEXT: popl %eax +; CHECK-NEXT: retl entry: %x.p.p = alloca i32* store i32* %x, i32** %x.p.p @@ -240,13 +303,17 @@ entry: ret void } -; CHECK-LABEL: _avoid_inalloca: -; CHECK: leal {{[0-9]+}}(%esp), %[[reg:[^ ]*]] -; CHECK: pushl %[[reg]] -; CHECK: calll _addrof_i32 -; CHECK: retl - define void @avoid_preallocated(i32* preallocated(i32) %x) { +; CHECK-LABEL: avoid_preallocated: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: pushl %eax +; CHECK-NEXT: leal {{[0-9]+}}(%esp), %eax +; CHECK-NEXT: movl %eax, (%esp) +; CHECK-NEXT: pushl %eax +; CHECK-NEXT: calll _addrof_i32 +; CHECK-NEXT: addl $4, %esp +; CHECK-NEXT: popl %eax +; CHECK-NEXT: retl entry: %x.p.p = alloca i32* store i32* %x, i32** %x.p.p @@ -254,14 +321,19 @@ entry: ret void } -; CHECK-LABEL: _avoid_preallocated: -; CHECK: leal {{[0-9]+}}(%esp), %[[reg:[^ ]*]] -; CHECK: pushl %[[reg]] -; CHECK: calll _addrof_i32 -; CHECK: retl - ; Don't elide the copy when the alloca is escaped with a store. define void @escape_with_store(i32 %x) { +; CHECK-LABEL: escape_with_store: +; CHECK: # %bb.0: +; CHECK-NEXT: subl $8, %esp +; CHECK-NEXT: movl {{[0-9]+}}(%esp), %eax +; CHECK-NEXT: movl %esp, %ecx +; CHECK-NEXT: movl %ecx, {{[0-9]+}}(%esp) +; CHECK-NEXT: movl %eax, (%esp) +; CHECK-NEXT: pushl %ecx +; CHECK-NEXT: calll _addrof_i32 +; CHECK-NEXT: addl $12, %esp +; CHECK-NEXT: retl %x1 = alloca i32 %x2 = alloca i32* store i32* %x1, i32** %x2 @@ -272,15 +344,24 @@ define void @escape_with_store(i32 %x) { ret void } -; CHECK-LABEL: _escape_with_store: -; CHECK: movl {{.*}}(%esp), %[[reg:[^ ]*]] -; CHECK: movl %[[reg]], [[offs:[0-9]*]](%esp) -; CHECK: calll _addrof_i32 - - ; This test case exposed issues with the use of TokenFactor. define void @sret_and_elide(i32* sret(i32) %sret, i32 %v) { +; CHECK-LABEL: sret_and_elide: +; CHECK: # %bb.0: +; CHECK-NEXT: pushl %edi +; CHECK-NEXT: pushl %esi +; CHECK-NEXT: movl {{[0-9]+}}(%esp), %esi +; CHECK-NEXT: movl {{[0-9]+}}(%esp), %edi +; CHECK-NEXT: leal {{[0-9]+}}(%esp), %eax +; CHECK-NEXT: pushl %eax +; CHECK-NEXT: calll _addrof_i32 +; CHECK-NEXT: addl $4, %esp +; CHECK-NEXT: movl %edi, (%esi) +; CHECK-NEXT: movl %esi, %eax +; CHECK-NEXT: popl %esi +; CHECK-NEXT: popl %edi +; CHECK-NEXT: retl %v.p = alloca i32 store i32 %v, i32* %v.p call void @addrof_i32(i32* %v.p) @@ -288,16 +369,20 @@ define void @sret_and_elide(i32* sret(i32) %sret, i32 %v) { ret void } -; CHECK-LABEL: _sret_and_elide: -; CHECK: pushl -; CHECK: pushl -; CHECK: movl 12(%esp), %[[sret:[^ ]*]] -; CHECK: movl 16(%esp), %[[v:[^ ]*]] -; CHECK: leal 16(%esp), %[[reg:[^ ]*]] -; CHECK: pushl %[[reg]] -; CHECK: calll _addrof_i32 -; CHECK: movl %[[v]], (%[[sret]]) -; CHECK: movl %[[sret]], %eax -; CHECK: popl -; CHECK: popl -; CHECK: retl +; FIXME: The argument elision is not legal in presence of irregular types. + +define i1 @use_i3(i3 %a1, i3 %a2) { +; CHECK-LABEL: use_i3: +; CHECK: # %bb.0: +; CHECK-NEXT: movb {{[0-9]+}}(%esp), %al +; CHECK-NEXT: andb $7, %al +; CHECK-NEXT: cmpb {{[0-9]+}}(%esp), %al +; CHECK-NEXT: sete %al +; CHECK-NEXT: retl + %tmp = alloca i3, align 4 + store i3 %a2, i3* %tmp, align 4 + %l = load i3, i3* %tmp + %res = icmp eq i3 %a1, %l + ret i1 %res +} +