From 0496477c5dc23667d7b49625a71f8639298fb2b1 Mon Sep 17 00:00:00 2001 From: Roman Lebedev Date: Sun, 30 Sep 2018 12:42:08 +0000 Subject: [PATCH] [NFC][CodeGen][X86][AArch64] Add 64-bit constant bit field extract pattern tests llvm-svn: 343404 --- llvm/test/CodeGen/AArch64/extract-bits.ll | 50 ++++++++++++ llvm/test/CodeGen/X86/extract-bits.ll | 124 ++++++++++++++++++++++++++++++ 2 files changed, 174 insertions(+) diff --git a/llvm/test/CodeGen/AArch64/extract-bits.ll b/llvm/test/CodeGen/AArch64/extract-bits.ll index a1eeee5..a60883b 100644 --- a/llvm/test/CodeGen/AArch64/extract-bits.ll +++ b/llvm/test/CodeGen/AArch64/extract-bits.ll @@ -788,3 +788,53 @@ define i32 @c4_i32_bad(i32 %arg) { %tmp1 = and i32 %tmp0, 16382 ret i32 %tmp1 } + +; i64 + +; The most canonical variant +define i64 @c0_i64(i64 %arg) { +; CHECK-LABEL: c0_i64: +; CHECK: // %bb.0: +; CHECK-NEXT: ubfx x0, x0, #51, #10 +; CHECK-NEXT: ret + %tmp0 = lshr i64 %arg, 51 + %tmp1 = and i64 %tmp0, 1023 + ret i64 %tmp1 +} + +; Should be still fine, but the mask is shifted +define i64 @c1_i64(i64 %arg) { +; CHECK-LABEL: c1_i64: +; CHECK: // %bb.0: +; CHECK-NEXT: lsr x8, x0, #51 +; CHECK-NEXT: and x0, x8, #0xffc +; CHECK-NEXT: ret + %tmp0 = lshr i64 %arg, 51 + %tmp1 = and i64 %tmp0, 4092 + ret i64 %tmp1 +} + +; Should be still fine, but the result is shifted left afterwards +define i64 @c2_i64(i64 %arg) { +; CHECK-LABEL: c2_i64: +; CHECK: // %bb.0: +; CHECK-NEXT: ubfx x8, x0, #51, #10 +; CHECK-NEXT: lsl x0, x8, #2 +; CHECK-NEXT: ret + %tmp0 = lshr i64 %arg, 51 + %tmp1 = and i64 %tmp0, 1023 + %tmp2 = shl i64 %tmp1, 2 + ret i64 %tmp2 +} + +; The mask covers newly shifted-in bit +define i64 @c4_i64_bad(i64 %arg) { +; CHECK-LABEL: c4_i64_bad: +; CHECK: // %bb.0: +; CHECK-NEXT: lsr x8, x0, #51 +; CHECK-NEXT: and x0, x8, #0x1ffe +; CHECK-NEXT: ret + %tmp0 = lshr i64 %arg, 51 + %tmp1 = and i64 %tmp0, 16382 + ret i64 %tmp1 +} diff --git a/llvm/test/CodeGen/X86/extract-bits.ll b/llvm/test/CodeGen/X86/extract-bits.ll index e9a3cec..6ee5b4a 100644 --- a/llvm/test/CodeGen/X86/extract-bits.ll +++ b/llvm/test/CodeGen/X86/extract-bits.ll @@ -5756,3 +5756,127 @@ define i32 @c4_i32_bad(i32 %arg) { %tmp1 = and i32 %tmp0, 16382 ret i32 %tmp1 } + +; i64 + +; The most canonical variant +define i64 @c0_i64(i64 %arg) { +; X86-NOBMI-LABEL: c0_i64: +; X86-NOBMI: # %bb.0: +; X86-NOBMI-NEXT: movl {{[0-9]+}}(%esp), %eax +; X86-NOBMI-NEXT: shrl $19, %eax +; X86-NOBMI-NEXT: andl $1023, %eax # imm = 0x3FF +; X86-NOBMI-NEXT: xorl %edx, %edx +; X86-NOBMI-NEXT: retl +; +; X86-BMI1NOTBM-LABEL: c0_i64: +; X86-BMI1NOTBM: # %bb.0: +; X86-BMI1NOTBM-NEXT: movl $2579, %eax # imm = 0xA13 +; X86-BMI1NOTBM-NEXT: bextrl %eax, {{[0-9]+}}(%esp), %eax +; X86-BMI1NOTBM-NEXT: xorl %edx, %edx +; X86-BMI1NOTBM-NEXT: retl +; +; X86-BMI1TBM-LABEL: c0_i64: +; X86-BMI1TBM: # %bb.0: +; X86-BMI1TBM-NEXT: bextrl $2579, {{[0-9]+}}(%esp), %eax # imm = 0xA13 +; X86-BMI1TBM-NEXT: xorl %edx, %edx +; X86-BMI1TBM-NEXT: retl +; +; X86-BMI1NOTBMBMI2-LABEL: c0_i64: +; X86-BMI1NOTBMBMI2: # %bb.0: +; X86-BMI1NOTBMBMI2-NEXT: movl $2579, %eax # imm = 0xA13 +; X86-BMI1NOTBMBMI2-NEXT: bextrl %eax, {{[0-9]+}}(%esp), %eax +; X86-BMI1NOTBMBMI2-NEXT: xorl %edx, %edx +; X86-BMI1NOTBMBMI2-NEXT: retl +; +; X64-NOBMI-LABEL: c0_i64: +; X64-NOBMI: # %bb.0: +; X64-NOBMI-NEXT: movq %rdi, %rax +; X64-NOBMI-NEXT: shrq $51, %rax +; X64-NOBMI-NEXT: andl $1023, %eax # imm = 0x3FF +; X64-NOBMI-NEXT: retq +; +; X64-BMI1NOTBM-LABEL: c0_i64: +; X64-BMI1NOTBM: # %bb.0: +; X64-BMI1NOTBM-NEXT: movl $2611, %eax # imm = 0xA33 +; X64-BMI1NOTBM-NEXT: bextrq %rax, %rdi, %rax +; X64-BMI1NOTBM-NEXT: retq +; +; X64-BMI1TBM-LABEL: c0_i64: +; X64-BMI1TBM: # %bb.0: +; X64-BMI1TBM-NEXT: bextrq $2611, %rdi, %rax # imm = 0xA33 +; X64-BMI1TBM-NEXT: retq +; +; X64-BMI1NOTBMBMI2-LABEL: c0_i64: +; X64-BMI1NOTBMBMI2: # %bb.0: +; X64-BMI1NOTBMBMI2-NEXT: movl $2611, %eax # imm = 0xA33 +; X64-BMI1NOTBMBMI2-NEXT: bextrq %rax, %rdi, %rax +; X64-BMI1NOTBMBMI2-NEXT: retq + %tmp0 = lshr i64 %arg, 51 + %tmp1 = and i64 %tmp0, 1023 + ret i64 %tmp1 +} + +; Should be still fine, but the mask is shifted +define i64 @c1_i64(i64 %arg) { +; X86-LABEL: c1_i64: +; X86: # %bb.0: +; X86-NEXT: movl {{[0-9]+}}(%esp), %eax +; X86-NEXT: shrl $19, %eax +; X86-NEXT: andl $4092, %eax # imm = 0xFFC +; X86-NEXT: xorl %edx, %edx +; X86-NEXT: retl +; +; X64-LABEL: c1_i64: +; X64: # %bb.0: +; X64-NEXT: movq %rdi, %rax +; X64-NEXT: shrq $51, %rax +; X64-NEXT: andl $4092, %eax # imm = 0xFFC +; X64-NEXT: retq + %tmp0 = lshr i64 %arg, 51 + %tmp1 = and i64 %tmp0, 4092 + ret i64 %tmp1 +} + +; Should be still fine, but the result is shifted left afterwards +define i64 @c2_i64(i64 %arg) { +; X86-LABEL: c2_i64: +; X86: # %bb.0: +; X86-NEXT: movl {{[0-9]+}}(%esp), %eax +; X86-NEXT: shrl $17, %eax +; X86-NEXT: andl $4092, %eax # imm = 0xFFC +; X86-NEXT: xorl %edx, %edx +; X86-NEXT: retl +; +; X64-LABEL: c2_i64: +; X64: # %bb.0: +; X64-NEXT: movq %rdi, %rax +; X64-NEXT: shrq $49, %rax +; X64-NEXT: andl $4092, %eax # imm = 0xFFC +; X64-NEXT: retq + %tmp0 = lshr i64 %arg, 51 + %tmp1 = and i64 %tmp0, 1023 + %tmp2 = shl i64 %tmp1, 2 + ret i64 %tmp2 +} + +; The mask covers newly shifted-in bit +define i64 @c4_i64_bad(i64 %arg) { +; X86-LABEL: c4_i64_bad: +; X86: # %bb.0: +; X86-NEXT: movl {{[0-9]+}}(%esp), %eax +; X86-NEXT: shrl $19, %eax +; X86-NEXT: andl $-2, %eax +; X86-NEXT: xorl %edx, %edx +; X86-NEXT: retl +; +; X64-LABEL: c4_i64_bad: +; X64: # %bb.0: +; X64-NEXT: movq %rdi, %rax +; X64-NEXT: shrq $51, %rax +; X64-NEXT: andl $-2, %eax +; X64-NEXT: retq + %tmp0 = lshr i64 %arg, 51 + %tmp1 = and i64 %tmp0, 16382 + ret i64 %tmp1 +} -- 2.7.4