From: Sanjay Patel Date: Tue, 30 Oct 2018 20:42:03 +0000 (+0000) Subject: [x86] try to make test immune to better div optimization; NFCI X-Git-Tag: llvmorg-8.0.0-rc1~5442 X-Git-Url: http://review.tizen.org/git/?a=commitdiff_plain;h=059ebc90ee43a3e38503712fb3bbd3a2fd7971e1;p=platform%2Fupstream%2Fllvm.git [x86] try to make test immune to better div optimization; NFCI llvm-svn: 345639 --- diff --git a/llvm/test/CodeGen/X86/pr32282.ll b/llvm/test/CodeGen/X86/pr32282.ll index 7ec6a5d..6b08e2f 100644 --- a/llvm/test/CodeGen/X86/pr32282.ll +++ b/llvm/test/CodeGen/X86/pr32282.ll @@ -9,11 +9,10 @@ @d = common global i64 zeroinitializer, align 8 @e = common global i64 zeroinitializer, align 8 -define void @foo() { +define void @foo(i64 %x) nounwind { ; X86-LABEL: foo: ; X86: # %bb.0: ; X86-NEXT: pushl %eax -; X86-NEXT: .cfi_def_cfa_offset 8 ; X86-NEXT: movl d, %eax ; X86-NEXT: notl %eax ; X86-NEXT: movl d+4, %ecx @@ -26,40 +25,35 @@ define void @foo() { ; X86-NEXT: addl $7, %eax ; X86-NEXT: adcl $0, %ecx ; X86-NEXT: pushl %ecx -; X86-NEXT: .cfi_adjust_cfa_offset 4 ; X86-NEXT: pushl %eax -; X86-NEXT: .cfi_adjust_cfa_offset 4 -; X86-NEXT: pushl $0 -; X86-NEXT: .cfi_adjust_cfa_offset 4 -; X86-NEXT: pushl $0 -; X86-NEXT: .cfi_adjust_cfa_offset 4 +; X86-NEXT: pushl {{[0-9]+}}(%esp) +; X86-NEXT: pushl {{[0-9]+}}(%esp) ; X86-NEXT: calll __divdi3 ; X86-NEXT: addl $16, %esp -; X86-NEXT: .cfi_adjust_cfa_offset -16 ; X86-NEXT: orl %eax, %edx ; X86-NEXT: setne {{[0-9]+}}(%esp) ; X86-NEXT: popl %eax -; X86-NEXT: .cfi_def_cfa_offset 4 ; X86-NEXT: retl ; ; X64-LABEL: foo: ; X64: # %bb.0: -; X64-NEXT: movq {{.*}}(%rip), %rax -; X64-NEXT: movabsq $3013716102212485120, %rcx # imm = 0x29D2DED3DE400000 -; X64-NEXT: andnq %rcx, %rax, %rcx +; X64-NEXT: movq %rdi, %rax +; X64-NEXT: movq {{.*}}(%rip), %rcx +; X64-NEXT: movabsq $3013716102212485120, %rdx # imm = 0x29D2DED3DE400000 +; X64-NEXT: andnq %rdx, %rcx, %rcx ; X64-NEXT: shrq $21, %rcx ; X64-NEXT: addq $7, %rcx -; X64-NEXT: movabsq $4393751543808, %rax # imm = 0x3FF00000000 -; X64-NEXT: testq %rax, %rcx +; X64-NEXT: movq %rdi, %rdx +; X64-NEXT: orq %rcx, %rdx +; X64-NEXT: shrq $32, %rdx ; X64-NEXT: je .LBB0_1 ; X64-NEXT: # %bb.2: -; X64-NEXT: xorl %eax, %eax -; X64-NEXT: xorl %edx, %edx -; X64-NEXT: divq %rcx +; X64-NEXT: cqto +; X64-NEXT: idivq %rcx ; X64-NEXT: jmp .LBB0_3 ; X64-NEXT: .LBB0_1: -; X64-NEXT: xorl %eax, %eax ; X64-NEXT: xorl %edx, %edx +; X64-NEXT: # kill: def $eax killed $eax killed $rax ; X64-NEXT: divl %ecx ; X64-NEXT: # kill: def $eax killed $eax def $rax ; X64-NEXT: .LBB0_3: @@ -86,7 +80,7 @@ define void @foo() { %18 = ashr i64 %4, %17 %19 = and i64 %18, 9223372036854775806 %20 = add nsw i64 7, %19 - %21 = sdiv i64 0, %20 + %21 = sdiv i64 %x, %20 %22 = icmp ne i64 %21, 0 %23 = zext i1 %22 to i8 store i8 %23, i8* %1, align 1