From: Ahmed Bougacha Date: Thu, 7 Apr 2016 02:06:53 +0000 (+0000) Subject: [X86] Refresh and tweak EFLAGS reuse tests. NFC. X-Git-Tag: llvmorg-3.9.0-rc1~9778 X-Git-Url: http://review.tizen.org/git/?a=commitdiff_plain;h=70bde5445b867f4cd0e7227f454890c39b0efdb6;p=platform%2Fupstream%2Fllvm.git [X86] Refresh and tweak EFLAGS reuse tests. NFC. The non-1 and EQ/NE tests were misguided. llvm-svn: 265635 --- diff --git a/llvm/test/CodeGen/X86/atomic-eflags-reuse.ll b/llvm/test/CodeGen/X86/atomic-eflags-reuse.ll index 152d7e0..a7463d3 100644 --- a/llvm/test/CodeGen/X86/atomic-eflags-reuse.ll +++ b/llvm/test/CodeGen/X86/atomic-eflags-reuse.ll @@ -1,83 +1,115 @@ -; NOTE: Assertions have been autogenerated by utils/update_llc_test_checks.py +; NOTE: Assertions have been autogenerated by update_llc_test_checks.py ; RUN: llc < %s -mtriple=x86_64-- | FileCheck %s -define i8 @test_add_1_setcc_ne(i64* %p) #0 { -; CHECK-LABEL: test_add_1_setcc_ne: +define i32 @test_add_1_cmov_slt(i64* %p, i32 %a0, i32 %a1) #0 { +; CHECK-LABEL: test_add_1_cmov_slt: ; CHECK: # BB#0: # %entry ; CHECK-NEXT: movl $1, %eax ; CHECK-NEXT: lock xaddq %rax, (%rdi) ; CHECK-NEXT: testq %rax, %rax -; CHECK-NEXT: setne %al +; CHECK-NEXT: cmovnsl %edx, %esi +; CHECK-NEXT: movl %esi, %eax ; CHECK-NEXT: retq entry: %tmp0 = atomicrmw add i64* %p, i64 1 seq_cst - %tmp1 = icmp ne i64 %tmp0, 0 - %tmp2 = zext i1 %tmp1 to i8 - ret i8 %tmp2 + %tmp1 = icmp slt i64 %tmp0, 0 + %tmp2 = select i1 %tmp1, i32 %a0, i32 %a1 + ret i32 %tmp2 +} + +define i32 @test_add_1_cmov_sge(i64* %p, i32 %a0, i32 %a1) #0 { +; CHECK-LABEL: test_add_1_cmov_sge: +; CHECK: # BB#0: # %entry +; CHECK-NEXT: movl $1, %eax +; CHECK-NEXT: lock xaddq %rax, (%rdi) +; CHECK-NEXT: testq %rax, %rax +; CHECK-NEXT: cmovsl %edx, %esi +; CHECK-NEXT: movl %esi, %eax +; CHECK-NEXT: retq +entry: + %tmp0 = atomicrmw add i64* %p, i64 1 seq_cst + %tmp1 = icmp sge i64 %tmp0, 0 + %tmp2 = select i1 %tmp1, i32 %a0, i32 %a1 + ret i32 %tmp2 } -define i8 @test_sub_1_setcc_eq(i64* %p) #0 { -; CHECK-LABEL: test_sub_1_setcc_eq: +define i32 @test_sub_1_cmov_sle(i64* %p, i32 %a0, i32 %a1) #0 { +; CHECK-LABEL: test_sub_1_cmov_sle: ; CHECK: # BB#0: # %entry ; CHECK-NEXT: movq $-1, %rax ; CHECK-NEXT: lock xaddq %rax, (%rdi) ; CHECK-NEXT: testq %rax, %rax -; CHECK-NEXT: sete %al +; CHECK-NEXT: cmovgl %edx, %esi +; CHECK-NEXT: movl %esi, %eax ; CHECK-NEXT: retq entry: %tmp0 = atomicrmw sub i64* %p, i64 1 seq_cst - %tmp1 = icmp eq i64 %tmp0, 0 - %tmp2 = zext i1 %tmp1 to i8 - ret i8 %tmp2 + %tmp1 = icmp sle i64 %tmp0, 0 + %tmp2 = select i1 %tmp1, i32 %a0, i32 %a1 + ret i32 %tmp2 +} + +define i32 @test_sub_1_cmov_sgt(i64* %p, i32 %a0, i32 %a1) #0 { +; CHECK-LABEL: test_sub_1_cmov_sgt: +; CHECK: # BB#0: # %entry +; CHECK-NEXT: movq $-1, %rax +; CHECK-NEXT: lock xaddq %rax, (%rdi) +; CHECK-NEXT: testq %rax, %rax +; CHECK-NEXT: cmovlel %edx, %esi +; CHECK-NEXT: movl %esi, %eax +; CHECK-NEXT: retq +entry: + %tmp0 = atomicrmw sub i64* %p, i64 1 seq_cst + %tmp1 = icmp sgt i64 %tmp0, 0 + %tmp2 = select i1 %tmp1, i32 %a0, i32 %a1 + ret i32 %tmp2 } ; FIXME: (setcc slt x, 0) gets combined into shr early. -define i8 @test_add_10_setcc_slt(i64* %p) #0 { -; CHECK-LABEL: test_add_10_setcc_slt: +define i8 @test_add_1_setcc_slt(i64* %p) #0 { +; CHECK-LABEL: test_add_1_setcc_slt: ; CHECK: # BB#0: # %entry -; CHECK-NEXT: movl $10, %eax +; CHECK-NEXT: movl $1, %eax ; CHECK-NEXT: lock xaddq %rax, (%rdi) ; CHECK-NEXT: shrq $63, %rax ; CHECK-NEXT: retq entry: - %tmp0 = atomicrmw add i64* %p, i64 10 seq_cst + %tmp0 = atomicrmw add i64* %p, i64 1 seq_cst %tmp1 = icmp slt i64 %tmp0, 0 %tmp2 = zext i1 %tmp1 to i8 ret i8 %tmp2 } -define i8 @test_sub_10_setcc_sge(i64* %p) #0 { -; CHECK-LABEL: test_sub_10_setcc_sge: +define i8 @test_sub_1_setcc_sgt(i64* %p) #0 { +; CHECK-LABEL: test_sub_1_setcc_sgt: ; CHECK: # BB#0: # %entry -; CHECK-NEXT: movq $-10, %rax +; CHECK-NEXT: movq $-1, %rax ; CHECK-NEXT: lock xaddq %rax, (%rdi) ; CHECK-NEXT: testq %rax, %rax -; CHECK-NEXT: setns %al +; CHECK-NEXT: setg %al ; CHECK-NEXT: retq entry: - %tmp0 = atomicrmw sub i64* %p, i64 10 seq_cst - %tmp1 = icmp sge i64 %tmp0, 0 + %tmp0 = atomicrmw sub i64* %p, i64 1 seq_cst + %tmp1 = icmp sgt i64 %tmp0, 0 %tmp2 = zext i1 %tmp1 to i8 ret i8 %tmp2 } -; Test jcc and cmov - -define i32 @test_add_10_brcond_sge(i64* %p, i32 %a0, i32 %a1) #0 { -; CHECK-LABEL: test_add_10_brcond_sge: +define i32 @test_add_1_brcond_sge(i64* %p, i32 %a0, i32 %a1) #0 { +; CHECK-LABEL: test_add_1_brcond_sge: ; CHECK: # BB#0: # %entry -; CHECK-NEXT: movl $10, %eax +; CHECK-NEXT: movl $1, %eax ; CHECK-NEXT: lock xaddq %rax, (%rdi) ; CHECK-NEXT: testq %rax, %rax -; CHECK-NEXT: js .LBB4_2 +; CHECK-NEXT: js .LBB6_2 ; CHECK-NEXT: # BB#1: # %t ; CHECK-NEXT: movl %esi, %eax ; CHECK-NEXT: retq -; CHECK-NEXT: .LBB4_2: # %f +; CHECK-NEXT: .LBB6_2: # %f ; CHECK-NEXT: movl %edx, %eax ; CHECK-NEXT: retq entry: - %tmp0 = atomicrmw add i64* %p, i64 10 seq_cst + %tmp0 = atomicrmw add i64* %p, i64 1 seq_cst %tmp1 = icmp sge i64 %tmp0, 0 br i1 %tmp1, label %t, label %f t: @@ -86,22 +118,6 @@ f: ret i32 %a1 } -define i32 @test_sub_1_cmov_slt(i64* %p, i32 %a0, i32 %a1) #0 { -; CHECK-LABEL: test_sub_1_cmov_slt: -; CHECK: # BB#0: # %entry -; CHECK-NEXT: movq $-1, %rax -; CHECK-NEXT: lock xaddq %rax, (%rdi) -; CHECK-NEXT: testq %rax, %rax -; CHECK-NEXT: cmovnsl %edx, %esi -; CHECK-NEXT: movl %esi, %eax -; CHECK-NEXT: retq -entry: - %tmp0 = atomicrmw sub i64* %p, i64 1 seq_cst - %tmp1 = icmp slt i64 %tmp0, 0 - %tmp2 = select i1 %tmp1, i32 %a0, i32 %a1 - ret i32 %tmp2 -} - ; Also make sure we don't muck with condition codes that we should ignore. ; No need to test unsigned comparisons, as they should all be simplified. @@ -139,21 +155,36 @@ entry: ; Test a result being used by more than just the comparison. -define i8 @test_add_1_setcc_ne_reuse(i64* %p, i64* %p2) #0 { -; CHECK-LABEL: test_add_1_setcc_ne_reuse: +define i8 @test_add_1_setcc_sgt_reuse(i64* %p, i64* %p2) #0 { +; CHECK-LABEL: test_add_1_setcc_sgt_reuse: ; CHECK: # BB#0: # %entry ; CHECK-NEXT: movl $1, %ecx ; CHECK-NEXT: lock xaddq %rcx, (%rdi) ; CHECK-NEXT: testq %rcx, %rcx -; CHECK-NEXT: setne %al +; CHECK-NEXT: setg %al ; CHECK-NEXT: movq %rcx, (%rsi) ; CHECK-NEXT: retq entry: %tmp0 = atomicrmw add i64* %p, i64 1 seq_cst - %tmp1 = icmp ne i64 %tmp0, 0 + %tmp1 = icmp sgt i64 %tmp0, 0 %tmp2 = zext i1 %tmp1 to i8 store i64 %tmp0, i64* %p2 ret i8 %tmp2 } +define i8 @test_sub_2_setcc_sgt(i64* %p) #0 { +; CHECK-LABEL: test_sub_2_setcc_sgt: +; CHECK: # BB#0: # %entry +; CHECK-NEXT: movq $-2, %rax +; CHECK-NEXT: lock xaddq %rax, (%rdi) +; CHECK-NEXT: testq %rax, %rax +; CHECK-NEXT: setg %al +; CHECK-NEXT: retq +entry: + %tmp0 = atomicrmw sub i64* %p, i64 2 seq_cst + %tmp1 = icmp sgt i64 %tmp0, 0 + %tmp2 = zext i1 %tmp1 to i8 + ret i8 %tmp2 +} + attributes #0 = { nounwind }