From caf5f5c490a696f1e730042c45b568500fabbad8 Mon Sep 17 00:00:00 2001 From: Sanjay Patel Date: Tue, 30 Oct 2018 20:46:23 +0000 Subject: [PATCH] [x86] try to make test immune to better div optimization; NFCI llvm-svn: 345642 --- llvm/test/CodeGen/X86/copy-eflags.ll | 43 +++++++++++++++--------------------- 1 file changed, 18 insertions(+), 25 deletions(-) diff --git a/llvm/test/CodeGen/X86/copy-eflags.ll b/llvm/test/CodeGen/X86/copy-eflags.ll index 836027f..1e9a598 100644 --- a/llvm/test/CodeGen/X86/copy-eflags.ll +++ b/llvm/test/CodeGen/X86/copy-eflags.ll @@ -200,45 +200,37 @@ else: ; Test a function that gets special select lowering into CFG with copied EFLAGS ; threaded across the CFG. This requires our EFLAGS copy rewriting to handle ; cross-block rewrites in at least some narrow cases. -define void @PR37100(i8 %arg1, i16 %arg2, i64 %arg3, i8 %arg4, i8* %ptr1, i32* %ptr2) { +define void @PR37100(i8 %arg1, i16 %arg2, i64 %arg3, i8 %arg4, i8* %ptr1, i32* %ptr2, i32 %x) nounwind { ; X32-LABEL: PR37100: ; X32: # %bb.0: # %bb ; X32-NEXT: pushl %ebp -; X32-NEXT: .cfi_def_cfa_offset 8 ; X32-NEXT: pushl %ebx -; X32-NEXT: .cfi_def_cfa_offset 12 ; X32-NEXT: pushl %edi -; X32-NEXT: .cfi_def_cfa_offset 16 ; X32-NEXT: pushl %esi -; X32-NEXT: .cfi_def_cfa_offset 20 -; X32-NEXT: .cfi_offset %esi, -20 -; X32-NEXT: .cfi_offset %edi, -16 -; X32-NEXT: .cfi_offset %ebx, -12 -; X32-NEXT: .cfi_offset %ebp, -8 ; X32-NEXT: movl {{[0-9]+}}(%esp), %esi -; X32-NEXT: movl {{[0-9]+}}(%esp), %edi ; X32-NEXT: movl {{[0-9]+}}(%esp), %ebx +; X32-NEXT: movl {{[0-9]+}}(%esp), %ebp ; X32-NEXT: movb {{[0-9]+}}(%esp), %ch ; X32-NEXT: movb {{[0-9]+}}(%esp), %cl ; X32-NEXT: jmp .LBB3_1 ; X32-NEXT: .p2align 4, 0x90 ; X32-NEXT: .LBB3_5: # %bb1 ; X32-NEXT: # in Loop: Header=BB3_1 Depth=1 -; X32-NEXT: xorl %eax, %eax -; X32-NEXT: xorl %edx, %edx -; X32-NEXT: idivl %ebp +; X32-NEXT: movl %esi, %eax +; X32-NEXT: cltd +; X32-NEXT: idivl %edi ; X32-NEXT: .LBB3_1: # %bb1 ; X32-NEXT: # =>This Inner Loop Header: Depth=1 ; X32-NEXT: movsbl %cl, %eax ; X32-NEXT: movl %eax, %edx ; X32-NEXT: sarl $31, %edx -; X32-NEXT: cmpl %eax, %esi +; X32-NEXT: cmpl %eax, {{[0-9]+}}(%esp) ; X32-NEXT: movl {{[0-9]+}}(%esp), %eax ; X32-NEXT: sbbl %edx, %eax ; X32-NEXT: setl %al ; X32-NEXT: setl %dl -; X32-NEXT: movzbl %dl, %ebp -; X32-NEXT: negl %ebp +; X32-NEXT: movzbl %dl, %edi +; X32-NEXT: negl %edi ; X32-NEXT: testb %al, %al ; X32-NEXT: jne .LBB3_3 ; X32-NEXT: # %bb.2: # %bb1 @@ -246,33 +238,34 @@ define void @PR37100(i8 %arg1, i16 %arg2, i64 %arg3, i8 %arg4, i8* %ptr1, i32* % ; X32-NEXT: movb %ch, %cl ; X32-NEXT: .LBB3_3: # %bb1 ; X32-NEXT: # in Loop: Header=BB3_1 Depth=1 -; X32-NEXT: movb %cl, (%ebx) -; X32-NEXT: movl (%edi), %edx +; X32-NEXT: movb %cl, (%ebp) +; X32-NEXT: movl (%ebx), %edx ; X32-NEXT: testb %al, %al ; X32-NEXT: jne .LBB3_5 ; X32-NEXT: # %bb.4: # %bb1 ; X32-NEXT: # in Loop: Header=BB3_1 Depth=1 -; X32-NEXT: movl %edx, %ebp +; X32-NEXT: movl %edx, %edi ; X32-NEXT: jmp .LBB3_5 ; ; X64-LABEL: PR37100: ; X64: # %bb.0: # %bb -; X64-NEXT: movq %rdx, %r10 +; X64-NEXT: movq %rdx, %r11 +; X64-NEXT: movl {{[0-9]+}}(%rsp), %r10d ; X64-NEXT: jmp .LBB3_1 ; X64-NEXT: .p2align 4, 0x90 ; X64-NEXT: .LBB3_5: # %bb1 ; X64-NEXT: # in Loop: Header=BB3_1 Depth=1 -; X64-NEXT: xorl %eax, %eax -; X64-NEXT: xorl %edx, %edx +; X64-NEXT: movl %r10d, %eax +; X64-NEXT: cltd ; X64-NEXT: idivl %esi ; X64-NEXT: .LBB3_1: # %bb1 ; X64-NEXT: # =>This Inner Loop Header: Depth=1 ; X64-NEXT: movsbq %dil, %rax ; X64-NEXT: xorl %esi, %esi -; X64-NEXT: cmpq %rax, %r10 +; X64-NEXT: cmpq %rax, %r11 ; X64-NEXT: setl %sil ; X64-NEXT: negl %esi -; X64-NEXT: cmpq %rax, %r10 +; X64-NEXT: cmpq %rax, %r11 ; X64-NEXT: jl .LBB3_3 ; X64-NEXT: # %bb.2: # %bb1 ; X64-NEXT: # in Loop: Header=BB3_1 Depth=1 @@ -300,7 +293,7 @@ bb1: store volatile i8 %tmp8, i8* %ptr1 %tmp9 = load volatile i32, i32* %ptr2 %tmp10 = select i1 %tmp6, i32 %tmp7, i32 %tmp9 - %tmp11 = srem i32 0, %tmp10 + %tmp11 = srem i32 %x, %tmp10 %tmp12 = trunc i32 %tmp11 to i16 br label %bb1 } -- 2.7.4