As detailed in https://blog.regehr.org/archives/1709 we don't make use of the known leading/trailing zeros for shifted values in cases where we don't know the shift amount value.
This patch adds support to SelectionDAG::ComputeKnownBits to use KnownBits::countMinTrailingZeros and countMinLeadingZeros to set the minimum guaranteed leading/trailing known zero bits.
Differential Revision: https://reviews.llvm.org/D72573
Known.One <<= Shift;
// Low bits are known zero.
Known.Zero.setLowBits(Shift);
+ } else {
+ // No matter the shift amount, the trailing zeros will stay zero.
+ Known = computeKnownBits(Op.getOperand(0), DemandedElts, Depth + 1);
+ Known.Zero =
+ APInt::getLowBitsSet(BitWidth, Known.countMinTrailingZeros());
+ Known.One.clearAllBits();
}
break;
case ISD::SRL:
} else if (const APInt *ShMinAmt = getValidMinimumShiftAmountConstant(Op)) {
// Minimum shift high bits are known zero.
Known.Zero.setHighBits(ShMinAmt->getZExtValue());
+ } else {
+ // No matter the shift amount, the leading zeros will stay zero.
+ Known = computeKnownBits(Op.getOperand(0), DemandedElts, Depth + 1);
+ Known.Zero = APInt::getHighBitsSet(BitWidth, Known.countMinLeadingZeros());
+ Known.One.clearAllBits();
}
break;
case ISD::SRA:
; CHECK-LABEL: scalar_i32_x_is_const2_eq:
; CHECK: // %bb.0:
; CHECK-NEXT: mov w8, #1
-; CHECK-NEXT: mov w9, #43605
; CHECK-NEXT: lsr w8, w8, w0
-; CHECK-NEXT: movk w9, #43605, lsl #16
-; CHECK-NEXT: tst w8, w9
+; CHECK-NEXT: cmp w8, #0 // =0
; CHECK-NEXT: cset w0, eq
; CHECK-NEXT: ret
%t0 = lshr i32 1, %y
; CHECK-NEXT: mov w8, #24
; CHECK-NEXT: // kill: def $w1 killed $w1 def $x1
; CHECK-NEXT: lsr w8, w8, w1
-; CHECK-NEXT: and w8, w8, w0
-; CHECK-NEXT: sxtb w8, w8
-; CHECK-NEXT: cmp w8, #0 // =0
+; CHECK-NEXT: tst w8, w0
; CHECK-NEXT: cset w0, lt
; CHECK-NEXT: ret
%t0 = lshr i8 24, %y
; CHECK-NEXT: // kill: def $w1 killed $w1 def $x1
; CHECK-NEXT: lsr w8, w8, w1
; CHECK-NEXT: and w8, w8, w0
-; CHECK-NEXT: and w8, w8, #0xff
; CHECK-NEXT: cmp w8, #1 // =1
; CHECK-NEXT: cset w0, eq
; CHECK-NEXT: ret
; CHECK-NEXT: and w8, w0, #0xff
; CHECK-NEXT: // kill: def $w1 killed $w1 def $x1
; CHECK-NEXT: lsr w8, w8, w1
-; CHECK-NEXT: ubfx w0, w8, #7, #1
+; CHECK-NEXT: lsr w0, w8, #7
; CHECK-NEXT: ret
%t0 = shl i8 128, %y
%t1 = and i8 %t0, %x
; CHECK-NEXT: // kill: def $w1 killed $w1 def $x1
; CHECK-NEXT: lsl w8, w8, w1
; CHECK-NEXT: and w8, w8, w0
-; CHECK-NEXT: and w8, w8, #0xff
+; CHECK-NEXT: and w8, w8, #0x80
; CHECK-NEXT: cmp w8, #1 // =1
; CHECK-NEXT: cset w0, eq
; CHECK-NEXT: ret
; NOTE: Assertions have been autogenerated by utils/update_llc_test_checks.py
; RUN: llc -march=amdgcn -mcpu=gfx900 -verify-machineinstrs < %s | FileCheck -enable-var-scope -check-prefixes=GCN,GFX9 %s
-; RUN: llc -march=amdgcn -mcpu=tonga -verify-machineinstrs < %s | FileCheck -enable-var-scope -check-prefixes=GCN,VI,CIVI %s
-; RUN: llc -march=amdgcn -mcpu=bonaire -verify-machineinstrs < %s | FileCheck -enable-var-scope -check-prefixes=GCN,CI,CIVI %s
+; RUN: llc -march=amdgcn -mcpu=tonga -verify-machineinstrs < %s | FileCheck -enable-var-scope -check-prefixes=GCN,CIVI,VI %s
+; RUN: llc -march=amdgcn -mcpu=bonaire -verify-machineinstrs < %s | FileCheck -enable-var-scope -check-prefixes=GCN,CIVI,CI %s
define amdgpu_kernel void @s_lshr_v2i16(<2 x i16> addrspace(1)* %out, <2 x i16> %lhs, <2 x i16> %rhs) #0 {
; GFX9-LABEL: s_lshr_v2i16:
; VI-NEXT: s_load_dword s0, s[0:1], 0x30
; VI-NEXT: s_mov_b32 s4, 0xffff
; VI-NEXT: s_waitcnt lgkmcnt(0)
-; VI-NEXT: s_and_b32 s1, s5, s4
-; VI-NEXT: s_and_b32 s4, s0, s4
-; VI-NEXT: s_lshr_b32 s5, s5, 16
-; VI-NEXT: s_lshr_b32 s0, s0, 16
-; VI-NEXT: s_lshr_b32 s0, s5, s0
-; VI-NEXT: v_mov_b32_e32 v0, s4
-; VI-NEXT: v_bfe_u32 v0, s1, v0, 16
-; VI-NEXT: s_lshl_b32 s0, s0, 16
-; VI-NEXT: v_or_b32_e32 v2, s0, v0
; VI-NEXT: v_mov_b32_e32 v0, s2
+; VI-NEXT: s_lshr_b32 s1, s5, 16
+; VI-NEXT: s_lshr_b32 s6, s0, 16
+; VI-NEXT: s_lshr_b32 s1, s1, s6
+; VI-NEXT: s_and_b32 s5, s5, s4
+; VI-NEXT: s_and_b32 s0, s0, s4
+; VI-NEXT: s_lshr_b32 s0, s5, s0
+; VI-NEXT: s_lshl_b32 s1, s1, 16
+; VI-NEXT: s_or_b32 s0, s0, s1
; VI-NEXT: v_mov_b32_e32 v1, s3
+; VI-NEXT: v_mov_b32_e32 v2, s0
; VI-NEXT: flat_store_dword v[0:1], v2
; VI-NEXT: s_endpgm
;
; CI-NEXT: s_waitcnt lgkmcnt(0)
; CI-NEXT: s_lshr_b32 s1, s2, 16
; CI-NEXT: s_lshr_b32 s8, s0, 16
+; CI-NEXT: s_lshr_b32 s1, s1, s8
+; CI-NEXT: s_and_b32 s2, s2, s3
; CI-NEXT: s_and_b32 s0, s0, s3
+; CI-NEXT: s_lshr_b32 s0, s2, s0
+; CI-NEXT: s_lshl_b32 s1, s1, 16
+; CI-NEXT: s_or_b32 s0, s0, s1
; CI-NEXT: v_mov_b32_e32 v0, s0
-; CI-NEXT: s_lshr_b32 s0, s1, s8
-; CI-NEXT: s_and_b32 s2, s2, s3
-; CI-NEXT: v_bfe_u32 v0, s2, v0, 16
-; CI-NEXT: s_lshl_b32 s0, s0, 16
-; CI-NEXT: v_or_b32_e32 v0, s0, v0
; CI-NEXT: buffer_store_dword v0, off, s[4:7], 0
; CI-NEXT: s_endpgm
%result = lshr <2 x i16> %lhs, %rhs
; CI-NEXT: v_lshrrev_b32_e32 v5, 16, v3
; CI-NEXT: v_and_b32_e32 v2, s8, v2
; CI-NEXT: v_and_b32_e32 v3, s8, v3
-; CI-NEXT: v_bfe_u32 v2, v2, v3, 16
+; CI-NEXT: v_lshrrev_b32_e32 v2, v3, v2
; CI-NEXT: v_lshrrev_b32_e32 v3, v5, v4
; CI-NEXT: v_lshlrev_b32_e32 v3, 16, v3
; CI-NEXT: v_or_b32_e32 v2, v2, v3
; CI-NEXT: v_lshrrev_b32_e32 v3, 16, v2
; CI-NEXT: v_and_b32_e32 v2, s10, v2
; CI-NEXT: v_lshrrev_b32_e32 v3, s9, v3
-; CI-NEXT: v_bfe_u32 v2, v2, s8, 16
+; CI-NEXT: v_lshrrev_b32_e32 v2, s8, v2
; CI-NEXT: v_lshlrev_b32_e32 v3, 16, v3
; CI-NEXT: v_or_b32_e32 v2, v2, v3
; CI-NEXT: buffer_store_dword v2, v[0:1], s[4:7], 0 addr64
; CI-NEXT: v_lshrrev_b32_e32 v3, 16, v2
; CI-NEXT: v_and_b32_e32 v2, s10, v2
; CI-NEXT: v_lshr_b32_e32 v3, s9, v3
-; CI-NEXT: v_bfe_u32 v2, s8, v2, 16
+; CI-NEXT: v_lshr_b32_e32 v2, s8, v2
; CI-NEXT: v_lshlrev_b32_e32 v3, 16, v3
; CI-NEXT: v_or_b32_e32 v2, v2, v3
; CI-NEXT: buffer_store_dword v2, v[0:1], s[4:7], 0 addr64
; CI-NEXT: v_lshrrev_b32_e32 v3, 16, v2
; CI-NEXT: v_and_b32_e32 v2, 0xffff, v2
; CI-NEXT: v_lshr_b32_e32 v3, 8, v3
-; CI-NEXT: v_bfe_u32 v2, 8, v2, 16
+; CI-NEXT: v_lshr_b32_e32 v2, 8, v2
; CI-NEXT: v_lshlrev_b32_e32 v3, 16, v3
; CI-NEXT: v_or_b32_e32 v2, v2, v3
; CI-NEXT: buffer_store_dword v2, v[0:1], s[0:3], 0 addr64
; CI-NEXT: v_and_b32_e32 v4, s8, v4
; CI-NEXT: v_and_b32_e32 v3, s8, v3
; CI-NEXT: v_and_b32_e32 v5, s8, v5
-; CI-NEXT: v_bfe_u32 v3, v3, v5, 16
+; CI-NEXT: v_lshrrev_b32_e32 v3, v5, v3
; CI-NEXT: v_lshrrev_b32_e32 v5, v9, v7
-; CI-NEXT: v_bfe_u32 v2, v2, v4, 16
+; CI-NEXT: v_lshrrev_b32_e32 v2, v4, v2
; CI-NEXT: v_lshrrev_b32_e32 v4, v8, v6
; CI-NEXT: v_lshlrev_b32_e32 v5, 16, v5
; CI-NEXT: v_lshlrev_b32_e32 v4, 16, v4
;
; EG-LABEL: s_shl_inline_imm_1_i64:
; EG: ; %bb.0:
-; EG-NEXT: ALU 13, @4, KC0[CB0:0-32], KC1[]
+; EG-NEXT: ALU 8, @4, KC0[CB0:0-32], KC1[]
; EG-NEXT: MEM_RAT_CACHELESS STORE_RAW T0.XY, T1.X, 1
; EG-NEXT: CF_END
; EG-NEXT: PAD
; EG-NEXT: ALU clause starting at 4:
-; EG-NEXT: SUB_INT * T0.W, literal.x, KC0[2].W,
-; EG-NEXT: 31(4.344025e-44), 0(0.000000e+00)
-; EG-NEXT: LSHR T0.W, 1, PV.W,
-; EG-NEXT: ADD_INT * T1.W, KC0[2].W, literal.x,
-; EG-NEXT: -32(nan), 0(0.000000e+00)
-; EG-NEXT: LSHL T0.Z, 1, PS,
-; EG-NEXT: LSHR T0.W, PV.W, 1,
-; EG-NEXT: SETGT_UINT * T1.W, KC0[2].W, literal.x,
-; EG-NEXT: 31(4.344025e-44), 0(0.000000e+00)
-; EG-NEXT: CNDE_INT T0.Y, PS, PV.W, PV.Z,
-; EG-NEXT: LSHL * T0.W, 1, KC0[2].W,
-; EG-NEXT: CNDE_INT T0.X, T1.W, PV.W, 0.0,
+; EG-NEXT: ADD_INT T0.Z, KC0[2].W, literal.x,
+; EG-NEXT: SETGT_UINT T0.W, KC0[2].W, literal.y,
+; EG-NEXT: LSHL * T1.W, 1, KC0[2].W,
+; EG-NEXT: -32(nan), 31(4.344025e-44)
+; EG-NEXT: CNDE_INT T0.X, PV.W, PS, 0.0,
+; EG-NEXT: LSHL T1.W, 1, PV.Z,
; EG-NEXT: LSHR * T1.X, KC0[2].Y, literal.x,
; EG-NEXT: 2(2.802597e-45), 0(0.000000e+00)
+; EG-NEXT: CNDE_INT * T0.Y, T0.W, 0.0, PV.W,
%shl = shl i64 1, %a
store i64 %shl, i64 addrspace(1)* %out, align 8
ret void
; NOTE: Assertions have been autogenerated by utils/update_llc_test_checks.py
; RUN: llc -march=amdgcn -mcpu=gfx900 -mattr=-flat-for-global -verify-machineinstrs < %s | FileCheck -enable-var-scope -check-prefixes=GCN,GFX9 %s
-; RUN: llc -march=amdgcn -mcpu=tonga -mattr=-flat-for-global -verify-machineinstrs < %s | FileCheck -enable-var-scope -check-prefixes=GCN,VI,CIVI %s
-; RUN: llc -march=amdgcn -mcpu=bonaire -verify-machineinstrs < %s | FileCheck -enable-var-scope -check-prefixes=GCN,CI,CIVI %s
+; RUN: llc -march=amdgcn -mcpu=tonga -mattr=-flat-for-global -verify-machineinstrs < %s | FileCheck -enable-var-scope -check-prefixes=GCN,CIVI,VI %s
+; RUN: llc -march=amdgcn -mcpu=bonaire -verify-machineinstrs < %s | FileCheck -enable-var-scope -check-prefixes=GCN,CIVI,CI %s
define amdgpu_kernel void @s_shl_v2i16(<2 x i16> addrspace(1)* %out, <2 x i16> %lhs, <2 x i16> %rhs) #0 {
; GFX9-LABEL: s_shl_v2i16:
; CI-NEXT: s_waitcnt lgkmcnt(0)
; CI-NEXT: s_mov_b64 s[4:5], s[2:3]
; CI-NEXT: buffer_load_dword v2, v[0:1], s[4:7], 0 addr64
-; CI-NEXT: s_mov_b32 s4, 0xffff
; CI-NEXT: s_mov_b64 s[2:3], s[6:7]
; CI-NEXT: s_waitcnt vmcnt(0)
-; CI-NEXT: v_and_b32_e32 v3, s4, v2
+; CI-NEXT: v_and_b32_e32 v3, 0xffff, v2
; CI-NEXT: v_lshrrev_b32_e32 v2, 16, v2
; CI-NEXT: v_lshl_b32_e32 v2, 8, v2
; CI-NEXT: v_lshl_b32_e32 v3, 8, v3
; CI-NEXT: v_lshlrev_b32_e32 v2, 16, v2
-; CI-NEXT: v_and_b32_e32 v3, s4, v3
+; CI-NEXT: v_and_b32_e32 v3, 0xfff8, v3
; CI-NEXT: v_or_b32_e32 v2, v3, v2
; CI-NEXT: buffer_store_dword v2, v[0:1], s[0:3], 0 addr64
; CI-NEXT: s_endpgm
ret i1 %res
}
define i1 @scalar_i32_x_is_const2_eq(i32 %y) nounwind {
-; ARM6-LABEL: scalar_i32_x_is_const2_eq:
-; ARM6: @ %bb.0:
-; ARM6-NEXT: ldr r2, .LCPI19_0
-; ARM6-NEXT: mov r1, #1
-; ARM6-NEXT: and r0, r2, r1, lsr r0
-; ARM6-NEXT: clz r0, r0
-; ARM6-NEXT: lsr r0, r0, #5
-; ARM6-NEXT: bx lr
-; ARM6-NEXT: .p2align 2
-; ARM6-NEXT: @ %bb.1:
-; ARM6-NEXT: .LCPI19_0:
-; ARM6-NEXT: .long 2857740885 @ 0xaa55aa55
-;
-; ARM78-LABEL: scalar_i32_x_is_const2_eq:
-; ARM78: @ %bb.0:
-; ARM78-NEXT: movw r1, #43605
-; ARM78-NEXT: mov r2, #1
-; ARM78-NEXT: movt r1, #43605
-; ARM78-NEXT: and r0, r1, r2, lsr r0
-; ARM78-NEXT: clz r0, r0
-; ARM78-NEXT: lsr r0, r0, #5
-; ARM78-NEXT: bx lr
+; ARM-LABEL: scalar_i32_x_is_const2_eq:
+; ARM: @ %bb.0:
+; ARM-NEXT: mov r1, #1
+; ARM-NEXT: eor r0, r1, r1, lsr r0
+; ARM-NEXT: bx lr
;
; THUMB6-LABEL: scalar_i32_x_is_const2_eq:
; THUMB6: @ %bb.0:
; THUMB6-NEXT: movs r1, #1
; THUMB6-NEXT: lsrs r1, r0
-; THUMB6-NEXT: ldr r2, .LCPI19_0
-; THUMB6-NEXT: ands r2, r1
-; THUMB6-NEXT: rsbs r0, r2, #0
-; THUMB6-NEXT: adcs r0, r2
+; THUMB6-NEXT: rsbs r0, r1, #0
+; THUMB6-NEXT: adcs r0, r1
; THUMB6-NEXT: bx lr
-; THUMB6-NEXT: .p2align 2
-; THUMB6-NEXT: @ %bb.1:
-; THUMB6-NEXT: .LCPI19_0:
-; THUMB6-NEXT: .long 2857740885 @ 0xaa55aa55
;
; THUMB78-LABEL: scalar_i32_x_is_const2_eq:
; THUMB78: @ %bb.0:
; THUMB78-NEXT: movs r1, #1
; THUMB78-NEXT: lsr.w r0, r1, r0
-; THUMB78-NEXT: movw r1, #43605
-; THUMB78-NEXT: movt r1, #43605
-; THUMB78-NEXT: ands r0, r1
-; THUMB78-NEXT: clz r0, r0
-; THUMB78-NEXT: lsrs r0, r0, #5
+; THUMB78-NEXT: eor r0, r0, #1
; THUMB78-NEXT: bx lr
%t0 = lshr i32 1, %y
%t1 = and i32 %t0, 2857740885
; ARM6: @ %bb.0:
; ARM6-NEXT: uxtb r1, r1
; ARM6-NEXT: mov r2, #24
-; ARM6-NEXT: and r0, r0, r2, lsr r1
-; ARM6-NEXT: sxtb r1, r0
+; ARM6-NEXT: and r1, r0, r2, lsr r1
; ARM6-NEXT: mov r0, #0
; ARM6-NEXT: cmp r1, #0
; ARM6-NEXT: movmi r0, #1
; ARM78: @ %bb.0:
; ARM78-NEXT: uxtb r1, r1
; ARM78-NEXT: mov r2, #24
-; ARM78-NEXT: and r0, r0, r2, lsr r1
-; ARM78-NEXT: sxtb r1, r0
+; ARM78-NEXT: and r1, r0, r2, lsr r1
; ARM78-NEXT: mov r0, #0
; ARM78-NEXT: cmp r1, #0
; ARM78-NEXT: movwmi r0, #1
; THUMB6-NEXT: movs r2, #24
; THUMB6-NEXT: lsrs r2, r1
; THUMB6-NEXT: ands r2, r0
-; THUMB6-NEXT: sxtb r0, r2
-; THUMB6-NEXT: cmp r0, #0
; THUMB6-NEXT: bmi .LBB20_2
; THUMB6-NEXT: @ %bb.1:
; THUMB6-NEXT: movs r0, #0
; THUMB78-NEXT: movs r2, #24
; THUMB78-NEXT: lsr.w r1, r2, r1
; THUMB78-NEXT: ands r0, r1
-; THUMB78-NEXT: sxtb r1, r0
-; THUMB78-NEXT: movs r0, #0
-; THUMB78-NEXT: cmp r1, #0
+; THUMB78-NEXT: mov.w r0, #0
; THUMB78-NEXT: it mi
; THUMB78-NEXT: movmi r0, #1
; THUMB78-NEXT: bx lr
; ARM-NEXT: uxtb r1, r1
; ARM-NEXT: mov r2, #128
; ARM-NEXT: and r0, r0, r2, lsr r1
-; ARM-NEXT: mvn r1, #0
-; ARM-NEXT: uxtab r0, r1, r0
+; ARM-NEXT: sub r0, r0, #1
; ARM-NEXT: clz r0, r0
; ARM-NEXT: lsr r0, r0, #5
; ARM-NEXT: bx lr
; THUMB6-NEXT: movs r2, #128
; THUMB6-NEXT: lsrs r2, r1
; THUMB6-NEXT: ands r2, r0
-; THUMB6-NEXT: uxtb r0, r2
-; THUMB6-NEXT: subs r1, r0, #1
+; THUMB6-NEXT: subs r1, r2, #1
; THUMB6-NEXT: rsbs r0, r1, #0
; THUMB6-NEXT: adcs r0, r1
; THUMB6-NEXT: bx lr
; THUMB78-NEXT: movs r2, #128
; THUMB78-NEXT: lsr.w r1, r2, r1
; THUMB78-NEXT: ands r0, r1
-; THUMB78-NEXT: mov.w r1, #-1
-; THUMB78-NEXT: uxtab r0, r1, r0
+; THUMB78-NEXT: subs r0, #1
; THUMB78-NEXT: clz r0, r0
; THUMB78-NEXT: lsrs r0, r0, #5
; THUMB78-NEXT: bx lr
; ARM-NEXT: uxtb r0, r0
; ARM-NEXT: lsr r0, r0, r1
; ARM-NEXT: mov r1, #1
-; ARM-NEXT: uxtb r0, r0
; ARM-NEXT: eor r0, r1, r0, lsr #7
; ARM-NEXT: bx lr
;
; THUMB7-NEXT: uxtb r0, r0
; THUMB7-NEXT: lsrs r0, r1
; THUMB7-NEXT: movs r1, #1
-; THUMB7-NEXT: uxtb r0, r0
; THUMB7-NEXT: eor.w r0, r1, r0, lsr #7
; THUMB7-NEXT: bx lr
;
; THUMB8-NEXT: uxtb r1, r1
; THUMB8-NEXT: lsrs r0, r1
; THUMB8-NEXT: movs r1, #1
-; THUMB8-NEXT: uxtb r0, r0
; THUMB8-NEXT: eor.w r0, r1, r0, lsr #7
; THUMB8-NEXT: bx lr
%t0 = shl i8 128, %y
; ARM-NEXT: uxth r0, r0
; ARM-NEXT: lsr r0, r0, r1
; ARM-NEXT: mov r1, #1
-; ARM-NEXT: uxth r0, r0
; ARM-NEXT: eor r0, r1, r0, lsr #15
; ARM-NEXT: bx lr
;
; THUMB7-NEXT: uxth r0, r0
; THUMB7-NEXT: lsrs r0, r1
; THUMB7-NEXT: movs r1, #1
-; THUMB7-NEXT: uxth r0, r0
; THUMB7-NEXT: eor.w r0, r1, r0, lsr #15
; THUMB7-NEXT: bx lr
;
; THUMB8-NEXT: uxth r1, r1
; THUMB8-NEXT: lsrs r0, r1
; THUMB8-NEXT: movs r1, #1
-; THUMB8-NEXT: uxth r0, r0
; THUMB8-NEXT: eor.w r0, r1, r0, lsr #15
; THUMB8-NEXT: bx lr
%t0 = shl i16 32768, %y
; ARM-NEXT: uxtb r1, r1
; ARM-NEXT: uxtb r0, r0
; ARM-NEXT: lsr r0, r0, r1
-; ARM-NEXT: uxtb r0, r0
; ARM-NEXT: lsr r0, r0, #7
; ARM-NEXT: bx lr
;
; THUMB6-NEXT: uxtb r1, r1
; THUMB6-NEXT: uxtb r0, r0
; THUMB6-NEXT: lsrs r0, r1
-; THUMB6-NEXT: uxtb r0, r0
; THUMB6-NEXT: lsrs r0, r0, #7
; THUMB6-NEXT: bx lr
;
; THUMB7-NEXT: uxtb r1, r1
; THUMB7-NEXT: uxtb r0, r0
; THUMB7-NEXT: lsrs r0, r1
-; THUMB7-NEXT: uxtb r0, r0
; THUMB7-NEXT: lsrs r0, r0, #7
; THUMB7-NEXT: bx lr
;
; THUMB8-NEXT: uxtb r0, r0
; THUMB8-NEXT: uxtb r1, r1
; THUMB8-NEXT: lsrs r0, r1
-; THUMB8-NEXT: uxtb r0, r0
; THUMB8-NEXT: lsrs r0, r0, #7
; THUMB8-NEXT: bx lr
%t0 = shl i8 128, %y
define i1 @scalar_i8_signbit_eq_with_nonzero(i8 %x, i8 %y) nounwind {
; ARM-LABEL: scalar_i8_signbit_eq_with_nonzero:
; ARM: @ %bb.0:
-; ARM-NEXT: uxtb r1, r1
-; ARM-NEXT: mvn r2, #127
-; ARM-NEXT: and r0, r0, r2, lsl r1
-; ARM-NEXT: mvn r1, #0
-; ARM-NEXT: uxtab r0, r1, r0
-; ARM-NEXT: clz r0, r0
-; ARM-NEXT: lsr r0, r0, #5
+; ARM-NEXT: mov r0, #0
; ARM-NEXT: bx lr
;
; THUMB6-LABEL: scalar_i8_signbit_eq_with_nonzero:
;
; THUMB78-LABEL: scalar_i8_signbit_eq_with_nonzero:
; THUMB78: @ %bb.0:
-; THUMB78-NEXT: uxtb r1, r1
-; THUMB78-NEXT: mvn r2, #127
-; THUMB78-NEXT: lsl.w r1, r2, r1
-; THUMB78-NEXT: ands r0, r1
-; THUMB78-NEXT: mov.w r1, #-1
-; THUMB78-NEXT: uxtab r0, r1, r0
-; THUMB78-NEXT: clz r0, r0
-; THUMB78-NEXT: lsrs r0, r0, #5
+; THUMB78-NEXT: movs r0, #0
; THUMB78-NEXT: bx lr
%t0 = shl i8 128, %y
%t1 = and i8 %t0, %x
entry:
; CHECK-LABEL: lshr32:
; CHECK: r0 >>= r2 # encoding: [0x7f,0x20,0x00,0x00,0x00,0x00,0x00,0x00]
-; CHECK: r0 <<= 32 # encoding: [0x67,0x00,0x00,0x00,0x20,0x00,0x00,0x00]
+; CHECK: exit # encoding: [0x95,0x00,0x00,0x00,0x00,0x00,0x00,0x00]
%shr = lshr i32 %a, %cnt
ret i32 %shr
}
define zeroext i8 @lshr_i8(i8 zeroext %a, i8 zeroext %b) {
; MIPS2-LABEL: lshr_i8:
; MIPS2: # %bb.0: # %entry
-; MIPS2-NEXT: srlv $1, $4, $5
; MIPS2-NEXT: jr $ra
-; MIPS2-NEXT: andi $2, $1, 255
+; MIPS2-NEXT: srlv $2, $4, $5
;
; MIPS32-LABEL: lshr_i8:
; MIPS32: # %bb.0: # %entry
-; MIPS32-NEXT: srlv $1, $4, $5
; MIPS32-NEXT: jr $ra
-; MIPS32-NEXT: andi $2, $1, 255
+; MIPS32-NEXT: srlv $2, $4, $5
;
; MIPS32R2-LABEL: lshr_i8:
; MIPS32R2: # %bb.0: # %entry
-; MIPS32R2-NEXT: srlv $1, $4, $5
; MIPS32R2-NEXT: jr $ra
-; MIPS32R2-NEXT: andi $2, $1, 255
+; MIPS32R2-NEXT: srlv $2, $4, $5
;
; MIPS32R6-LABEL: lshr_i8:
; MIPS32R6: # %bb.0: # %entry
-; MIPS32R6-NEXT: srlv $1, $4, $5
; MIPS32R6-NEXT: jr $ra
-; MIPS32R6-NEXT: andi $2, $1, 255
+; MIPS32R6-NEXT: srlv $2, $4, $5
;
; MIPS3-LABEL: lshr_i8:
; MIPS3: # %bb.0: # %entry
-; MIPS3-NEXT: srlv $1, $4, $5
; MIPS3-NEXT: jr $ra
-; MIPS3-NEXT: andi $2, $1, 255
+; MIPS3-NEXT: srlv $2, $4, $5
;
; MIPS4-LABEL: lshr_i8:
; MIPS4: # %bb.0: # %entry
-; MIPS4-NEXT: srlv $1, $4, $5
; MIPS4-NEXT: jr $ra
-; MIPS4-NEXT: andi $2, $1, 255
+; MIPS4-NEXT: srlv $2, $4, $5
;
; MIPS64-LABEL: lshr_i8:
; MIPS64: # %bb.0: # %entry
-; MIPS64-NEXT: srlv $1, $4, $5
; MIPS64-NEXT: jr $ra
-; MIPS64-NEXT: andi $2, $1, 255
+; MIPS64-NEXT: srlv $2, $4, $5
;
; MIPS64R2-LABEL: lshr_i8:
; MIPS64R2: # %bb.0: # %entry
-; MIPS64R2-NEXT: srlv $1, $4, $5
; MIPS64R2-NEXT: jr $ra
-; MIPS64R2-NEXT: andi $2, $1, 255
+; MIPS64R2-NEXT: srlv $2, $4, $5
;
; MIPS64R6-LABEL: lshr_i8:
; MIPS64R6: # %bb.0: # %entry
-; MIPS64R6-NEXT: srlv $1, $4, $5
; MIPS64R6-NEXT: jr $ra
-; MIPS64R6-NEXT: andi $2, $1, 255
+; MIPS64R6-NEXT: srlv $2, $4, $5
;
; MMR3-LABEL: lshr_i8:
; MMR3: # %bb.0: # %entry
+; MMR3-NEXT: jr $ra
; MMR3-NEXT: srlv $2, $4, $5
-; MMR3-NEXT: andi16 $2, $2, 255
-; MMR3-NEXT: jrc $ra
;
; MMR6-LABEL: lshr_i8:
; MMR6: # %bb.0: # %entry
; MMR6-NEXT: srlv $2, $4, $5
-; MMR6-NEXT: andi16 $2, $2, 255
; MMR6-NEXT: jrc $ra
entry:
define zeroext i16 @lshr_i16(i16 zeroext %a, i16 zeroext %b) {
; MIPS2-LABEL: lshr_i16:
; MIPS2: # %bb.0: # %entry
-; MIPS2-NEXT: srlv $1, $4, $5
; MIPS2-NEXT: jr $ra
-; MIPS2-NEXT: andi $2, $1, 65535
+; MIPS2-NEXT: srlv $2, $4, $5
;
; MIPS32-LABEL: lshr_i16:
; MIPS32: # %bb.0: # %entry
-; MIPS32-NEXT: srlv $1, $4, $5
; MIPS32-NEXT: jr $ra
-; MIPS32-NEXT: andi $2, $1, 65535
+; MIPS32-NEXT: srlv $2, $4, $5
;
; MIPS32R2-LABEL: lshr_i16:
; MIPS32R2: # %bb.0: # %entry
-; MIPS32R2-NEXT: srlv $1, $4, $5
; MIPS32R2-NEXT: jr $ra
-; MIPS32R2-NEXT: andi $2, $1, 65535
+; MIPS32R2-NEXT: srlv $2, $4, $5
;
; MIPS32R6-LABEL: lshr_i16:
; MIPS32R6: # %bb.0: # %entry
-; MIPS32R6-NEXT: srlv $1, $4, $5
; MIPS32R6-NEXT: jr $ra
-; MIPS32R6-NEXT: andi $2, $1, 65535
+; MIPS32R6-NEXT: srlv $2, $4, $5
;
; MIPS3-LABEL: lshr_i16:
; MIPS3: # %bb.0: # %entry
-; MIPS3-NEXT: srlv $1, $4, $5
; MIPS3-NEXT: jr $ra
-; MIPS3-NEXT: andi $2, $1, 65535
+; MIPS3-NEXT: srlv $2, $4, $5
;
; MIPS4-LABEL: lshr_i16:
; MIPS4: # %bb.0: # %entry
-; MIPS4-NEXT: srlv $1, $4, $5
; MIPS4-NEXT: jr $ra
-; MIPS4-NEXT: andi $2, $1, 65535
+; MIPS4-NEXT: srlv $2, $4, $5
;
; MIPS64-LABEL: lshr_i16:
; MIPS64: # %bb.0: # %entry
-; MIPS64-NEXT: srlv $1, $4, $5
; MIPS64-NEXT: jr $ra
-; MIPS64-NEXT: andi $2, $1, 65535
+; MIPS64-NEXT: srlv $2, $4, $5
;
; MIPS64R2-LABEL: lshr_i16:
; MIPS64R2: # %bb.0: # %entry
-; MIPS64R2-NEXT: srlv $1, $4, $5
; MIPS64R2-NEXT: jr $ra
-; MIPS64R2-NEXT: andi $2, $1, 65535
+; MIPS64R2-NEXT: srlv $2, $4, $5
;
; MIPS64R6-LABEL: lshr_i16:
; MIPS64R6: # %bb.0: # %entry
-; MIPS64R6-NEXT: srlv $1, $4, $5
; MIPS64R6-NEXT: jr $ra
-; MIPS64R6-NEXT: andi $2, $1, 65535
+; MIPS64R6-NEXT: srlv $2, $4, $5
;
; MMR3-LABEL: lshr_i16:
; MMR3: # %bb.0: # %entry
+; MMR3-NEXT: jr $ra
; MMR3-NEXT: srlv $2, $4, $5
-; MMR3-NEXT: andi16 $2, $2, 65535
-; MMR3-NEXT: jrc $ra
;
; MMR6-LABEL: lshr_i16:
; MMR6: # %bb.0: # %entry
; MMR6-NEXT: srlv $2, $4, $5
-; MMR6-NEXT: andi16 $2, $2, 65535
; MMR6-NEXT: jrc $ra
entry:
; X32-NEXT: vpmovzxwd {{.*#+}} ymm1 = xmm1[0],zero,xmm1[1],zero,xmm1[2],zero,xmm1[3],zero,xmm1[4],zero,xmm1[5],zero,xmm1[6],zero,xmm1[7],zero
; X32-NEXT: vpmovzxwd {{.*#+}} ymm0 = xmm0[0],zero,xmm0[1],zero,xmm0[2],zero,xmm0[3],zero,xmm0[4],zero,xmm0[5],zero,xmm0[6],zero,xmm0[7],zero
; X32-NEXT: vpsrlvd %ymm1, %ymm0, %ymm0
-; X32-NEXT: vpshufb {{.*#+}} ymm0 = ymm0[0,1,4,5,8,9,12,13,8,9,12,13,12,13,14,15,16,17,20,21,24,25,28,29,24,25,28,29,28,29,30,31]
-; X32-NEXT: vpermq {{.*#+}} ymm0 = ymm0[0,2,2,3]
-; X32-NEXT: # kill: def $xmm0 killed $xmm0 killed $ymm0
+; X32-NEXT: vextracti128 $1, %ymm0, %xmm1
+; X32-NEXT: vpackusdw %xmm1, %xmm0, %xmm0
; X32-NEXT: vzeroupper
; X32-NEXT: retl
;
; X64-NEXT: vpmovzxwd {{.*#+}} ymm1 = xmm1[0],zero,xmm1[1],zero,xmm1[2],zero,xmm1[3],zero,xmm1[4],zero,xmm1[5],zero,xmm1[6],zero,xmm1[7],zero
; X64-NEXT: vpmovzxwd {{.*#+}} ymm0 = xmm0[0],zero,xmm0[1],zero,xmm0[2],zero,xmm0[3],zero,xmm0[4],zero,xmm0[5],zero,xmm0[6],zero,xmm0[7],zero
; X64-NEXT: vpsrlvd %ymm1, %ymm0, %ymm0
-; X64-NEXT: vpshufb {{.*#+}} ymm0 = ymm0[0,1,4,5,8,9,12,13,8,9,12,13,12,13,14,15,16,17,20,21,24,25,28,29,24,25,28,29,28,29,30,31]
-; X64-NEXT: vpermq {{.*#+}} ymm0 = ymm0[0,2,2,3]
-; X64-NEXT: # kill: def $xmm0 killed $xmm0 killed $ymm0
+; X64-NEXT: vextracti128 $1, %ymm0, %xmm1
+; X64-NEXT: vpackusdw %xmm1, %xmm0, %xmm0
; X64-NEXT: vzeroupper
; X64-NEXT: retq
%res = lshr <8 x i16> %lhs, %rhs
; X32-NEXT: vpmovzxwd {{.*#+}} ymm1 = xmm1[0],zero,xmm1[1],zero,xmm1[2],zero,xmm1[3],zero,xmm1[4],zero,xmm1[5],zero,xmm1[6],zero,xmm1[7],zero
; X32-NEXT: vpmovzxwd {{.*#+}} ymm0 = xmm0[0],zero,xmm0[1],zero,xmm0[2],zero,xmm0[3],zero,xmm0[4],zero,xmm0[5],zero,xmm0[6],zero,xmm0[7],zero
; X32-NEXT: vpsrlvd %ymm1, %ymm0, %ymm0
-; X32-NEXT: vpshufb {{.*#+}} ymm0 = ymm0[0,1,4,5,8,9,12,13,8,9,12,13,12,13,14,15,16,17,20,21,24,25,28,29,24,25,28,29,28,29,30,31]
-; X32-NEXT: vpermq {{.*#+}} ymm0 = ymm0[0,2,2,3]
-; X32-NEXT: # kill: def $xmm0 killed $xmm0 killed $ymm0
+; X32-NEXT: vextracti128 $1, %ymm0, %xmm1
+; X32-NEXT: vpackusdw %xmm1, %xmm0, %xmm0
; X32-NEXT: vzeroupper
; X32-NEXT: retl
;
; X64-NEXT: vpmovzxwd {{.*#+}} ymm1 = xmm1[0],zero,xmm1[1],zero,xmm1[2],zero,xmm1[3],zero,xmm1[4],zero,xmm1[5],zero,xmm1[6],zero,xmm1[7],zero
; X64-NEXT: vpmovzxwd {{.*#+}} ymm0 = xmm0[0],zero,xmm0[1],zero,xmm0[2],zero,xmm0[3],zero,xmm0[4],zero,xmm0[5],zero,xmm0[6],zero,xmm0[7],zero
; X64-NEXT: vpsrlvd %ymm1, %ymm0, %ymm0
-; X64-NEXT: vpshufb {{.*#+}} ymm0 = ymm0[0,1,4,5,8,9,12,13,8,9,12,13,12,13,14,15,16,17,20,21,24,25,28,29,24,25,28,29,28,29,30,31]
-; X64-NEXT: vpermq {{.*#+}} ymm0 = ymm0[0,2,2,3]
-; X64-NEXT: # kill: def $xmm0 killed $xmm0 killed $ymm0
+; X64-NEXT: vextracti128 $1, %ymm0, %xmm1
+; X64-NEXT: vpackusdw %xmm1, %xmm0, %xmm0
; X64-NEXT: vzeroupper
; X64-NEXT: retq
%lshr = lshr <8 x i16> %r, %a
; X86-BMI1-NEXT: movb {{[0-9]+}}(%esp), %cl
; X86-BMI1-NEXT: movl $1, %eax
; X86-BMI1-NEXT: shrl %cl, %eax
-; X86-BMI1-NEXT: testl $-1437226411, %eax # imm = 0xAA55AA55
+; X86-BMI1-NEXT: testb %al, %al
; X86-BMI1-NEXT: sete %al
; X86-BMI1-NEXT: retl
;
; X86-BMI2-NEXT: movb {{[0-9]+}}(%esp), %al
; X86-BMI2-NEXT: movl $1, %ecx
; X86-BMI2-NEXT: shrxl %eax, %ecx, %eax
-; X86-BMI2-NEXT: testl $-1437226411, %eax # imm = 0xAA55AA55
+; X86-BMI2-NEXT: testb %al, %al
; X86-BMI2-NEXT: sete %al
; X86-BMI2-NEXT: retl
;
; X64-BMI1-NEXT: movl $1, %eax
; X64-BMI1-NEXT: # kill: def $cl killed $cl killed $ecx
; X64-BMI1-NEXT: shrl %cl, %eax
-; X64-BMI1-NEXT: testl $-1437226411, %eax # imm = 0xAA55AA55
+; X64-BMI1-NEXT: testb %al, %al
; X64-BMI1-NEXT: sete %al
; X64-BMI1-NEXT: retq
;
; X64-BMI2: # %bb.0:
; X64-BMI2-NEXT: movl $1, %eax
; X64-BMI2-NEXT: shrxl %edi, %eax, %eax
-; X64-BMI2-NEXT: testl $-1437226411, %eax # imm = 0xAA55AA55
+; X64-BMI2-NEXT: testb %al, %al
; X64-BMI2-NEXT: sete %al
; X64-BMI2-NEXT: retq
%t0 = lshr i32 1, %y
;------------------------------------------------------------------------------;
define i1 @negative_scalar_i8_bitsinmiddle_slt(i8 %x, i8 %y) nounwind {
-; X86-LABEL: negative_scalar_i8_bitsinmiddle_slt:
-; X86: # %bb.0:
-; X86-NEXT: movb {{[0-9]+}}(%esp), %cl
-; X86-NEXT: movb $24, %al
-; X86-NEXT: shrb %cl, %al
-; X86-NEXT: andb {{[0-9]+}}(%esp), %al
-; X86-NEXT: shrb $7, %al
-; X86-NEXT: retl
-;
-; X64-LABEL: negative_scalar_i8_bitsinmiddle_slt:
-; X64: # %bb.0:
-; X64-NEXT: movl %esi, %ecx
-; X64-NEXT: movb $24, %al
-; X64-NEXT: # kill: def $cl killed $cl killed $ecx
-; X64-NEXT: shrb %cl, %al
-; X64-NEXT: andb %dil, %al
-; X64-NEXT: shrb $7, %al
-; X64-NEXT: retq
+; CHECK-LABEL: negative_scalar_i8_bitsinmiddle_slt:
+; CHECK: # %bb.0:
+; CHECK-NEXT: xorl %eax, %eax
+; CHECK-NEXT: ret{{[l|q]}}
%t0 = lshr i8 24, %y
%t1 = and i8 %t0, %x
%res = icmp slt i8 %t1, 0
;
; AVX2-LABEL: var_funnnel_v8i16:
; AVX2: # %bb.0:
-; AVX2-NEXT: vpmovzxwd {{.*#+}} ymm3 = xmm0[0],zero,xmm0[1],zero,xmm0[2],zero,xmm0[3],zero,xmm0[4],zero,xmm0[5],zero,xmm0[6],zero,xmm0[7],zero
; AVX2-NEXT: vpand {{.*}}(%rip), %xmm2, %xmm2
+; AVX2-NEXT: vmovdqa {{.*#+}} xmm3 = [16,16,16,16,16,16,16,16]
+; AVX2-NEXT: vpsubw %xmm2, %xmm3, %xmm3
+; AVX2-NEXT: vpmovzxwd {{.*#+}} ymm3 = xmm3[0],zero,xmm3[1],zero,xmm3[2],zero,xmm3[3],zero,xmm3[4],zero,xmm3[5],zero,xmm3[6],zero,xmm3[7],zero
+; AVX2-NEXT: vpmovzxwd {{.*#+}} ymm1 = xmm1[0],zero,xmm1[1],zero,xmm1[2],zero,xmm1[3],zero,xmm1[4],zero,xmm1[5],zero,xmm1[6],zero,xmm1[7],zero
+; AVX2-NEXT: vpsrlvd %ymm3, %ymm1, %ymm1
+; AVX2-NEXT: vextracti128 $1, %ymm1, %xmm3
+; AVX2-NEXT: vpackusdw %xmm3, %xmm1, %xmm1
+; AVX2-NEXT: vpmovzxwd {{.*#+}} ymm3 = xmm0[0],zero,xmm0[1],zero,xmm0[2],zero,xmm0[3],zero,xmm0[4],zero,xmm0[5],zero,xmm0[6],zero,xmm0[7],zero
; AVX2-NEXT: vpmovzxwd {{.*#+}} ymm4 = xmm2[0],zero,xmm2[1],zero,xmm2[2],zero,xmm2[3],zero,xmm2[4],zero,xmm2[5],zero,xmm2[6],zero,xmm2[7],zero
; AVX2-NEXT: vpsllvd %ymm4, %ymm3, %ymm3
-; AVX2-NEXT: vmovdqa {{.*#+}} ymm4 = [0,1,4,5,8,9,12,13,8,9,12,13,12,13,14,15,16,17,20,21,24,25,28,29,24,25,28,29,28,29,30,31]
-; AVX2-NEXT: vpshufb %ymm4, %ymm3, %ymm3
+; AVX2-NEXT: vpshufb {{.*#+}} ymm3 = ymm3[0,1,4,5,8,9,12,13,8,9,12,13,12,13,14,15,16,17,20,21,24,25,28,29,24,25,28,29,28,29,30,31]
; AVX2-NEXT: vpermq {{.*#+}} ymm3 = ymm3[0,2,2,3]
-; AVX2-NEXT: vmovdqa {{.*#+}} xmm5 = [16,16,16,16,16,16,16,16]
-; AVX2-NEXT: vpsubw %xmm2, %xmm5, %xmm5
-; AVX2-NEXT: vpmovzxwd {{.*#+}} ymm5 = xmm5[0],zero,xmm5[1],zero,xmm5[2],zero,xmm5[3],zero,xmm5[4],zero,xmm5[5],zero,xmm5[6],zero,xmm5[7],zero
-; AVX2-NEXT: vpmovzxwd {{.*#+}} ymm1 = xmm1[0],zero,xmm1[1],zero,xmm1[2],zero,xmm1[3],zero,xmm1[4],zero,xmm1[5],zero,xmm1[6],zero,xmm1[7],zero
-; AVX2-NEXT: vpsrlvd %ymm5, %ymm1, %ymm1
-; AVX2-NEXT: vpshufb %ymm4, %ymm1, %ymm1
-; AVX2-NEXT: vpermq {{.*#+}} ymm1 = ymm1[0,2,2,3]
; AVX2-NEXT: vpor %xmm1, %xmm3, %xmm1
; AVX2-NEXT: vpxor %xmm3, %xmm3, %xmm3
; AVX2-NEXT: vpcmpeqw %xmm3, %xmm2, %xmm2
;
; AVX2-LABEL: var_funnnel_v8i16:
; AVX2: # %bb.0:
-; AVX2-NEXT: vpmovzxwd {{.*#+}} ymm0 = xmm0[0],zero,xmm0[1],zero,xmm0[2],zero,xmm0[3],zero,xmm0[4],zero,xmm0[5],zero,xmm0[6],zero,xmm0[7],zero
; AVX2-NEXT: vpand {{.*}}(%rip), %xmm1, %xmm1
-; AVX2-NEXT: vpmovzxwd {{.*#+}} ymm2 = xmm1[0],zero,xmm1[1],zero,xmm1[2],zero,xmm1[3],zero,xmm1[4],zero,xmm1[5],zero,xmm1[6],zero,xmm1[7],zero
-; AVX2-NEXT: vpsllvd %ymm2, %ymm0, %ymm2
-; AVX2-NEXT: vmovdqa {{.*#+}} ymm3 = [0,1,4,5,8,9,12,13,8,9,12,13,12,13,14,15,16,17,20,21,24,25,28,29,24,25,28,29,28,29,30,31]
-; AVX2-NEXT: vpshufb %ymm3, %ymm2, %ymm2
-; AVX2-NEXT: vpermq {{.*#+}} ymm2 = ymm2[0,2,2,3]
-; AVX2-NEXT: vmovdqa {{.*#+}} xmm4 = [16,16,16,16,16,16,16,16]
-; AVX2-NEXT: vpsubw %xmm1, %xmm4, %xmm1
+; AVX2-NEXT: vmovdqa {{.*#+}} xmm2 = [16,16,16,16,16,16,16,16]
+; AVX2-NEXT: vpsubw %xmm1, %xmm2, %xmm2
+; AVX2-NEXT: vpmovzxwd {{.*#+}} ymm2 = xmm2[0],zero,xmm2[1],zero,xmm2[2],zero,xmm2[3],zero,xmm2[4],zero,xmm2[5],zero,xmm2[6],zero,xmm2[7],zero
+; AVX2-NEXT: vpmovzxwd {{.*#+}} ymm0 = xmm0[0],zero,xmm0[1],zero,xmm0[2],zero,xmm0[3],zero,xmm0[4],zero,xmm0[5],zero,xmm0[6],zero,xmm0[7],zero
+; AVX2-NEXT: vpsrlvd %ymm2, %ymm0, %ymm2
+; AVX2-NEXT: vextracti128 $1, %ymm2, %xmm3
+; AVX2-NEXT: vpackusdw %xmm3, %xmm2, %xmm2
; AVX2-NEXT: vpmovzxwd {{.*#+}} ymm1 = xmm1[0],zero,xmm1[1],zero,xmm1[2],zero,xmm1[3],zero,xmm1[4],zero,xmm1[5],zero,xmm1[6],zero,xmm1[7],zero
-; AVX2-NEXT: vpsrlvd %ymm1, %ymm0, %ymm0
-; AVX2-NEXT: vpshufb %ymm3, %ymm0, %ymm0
+; AVX2-NEXT: vpsllvd %ymm1, %ymm0, %ymm0
+; AVX2-NEXT: vpshufb {{.*#+}} ymm0 = ymm0[0,1,4,5,8,9,12,13,8,9,12,13,12,13,14,15,16,17,20,21,24,25,28,29,24,25,28,29,28,29,30,31]
; AVX2-NEXT: vpermq {{.*#+}} ymm0 = ymm0[0,2,2,3]
-; AVX2-NEXT: vpor %xmm0, %xmm2, %xmm0
+; AVX2-NEXT: vpor %xmm2, %xmm0, %xmm0
; AVX2-NEXT: vzeroupper
; AVX2-NEXT: retq
;
; AVX2-NEXT: vpand {{.*}}(%rip), %xmm2, %xmm2
; AVX2-NEXT: vpmovzxwd {{.*#+}} ymm4 = xmm2[0],zero,xmm2[1],zero,xmm2[2],zero,xmm2[3],zero,xmm2[4],zero,xmm2[5],zero,xmm2[6],zero,xmm2[7],zero
; AVX2-NEXT: vpsrlvd %ymm4, %ymm3, %ymm3
-; AVX2-NEXT: vmovdqa {{.*#+}} ymm4 = [0,1,4,5,8,9,12,13,8,9,12,13,12,13,14,15,16,17,20,21,24,25,28,29,24,25,28,29,28,29,30,31]
-; AVX2-NEXT: vpshufb %ymm4, %ymm3, %ymm3
-; AVX2-NEXT: vpermq {{.*#+}} ymm3 = ymm3[0,2,2,3]
-; AVX2-NEXT: vmovdqa {{.*#+}} xmm5 = [16,16,16,16,16,16,16,16]
-; AVX2-NEXT: vpsubw %xmm2, %xmm5, %xmm5
-; AVX2-NEXT: vpmovzxwd {{.*#+}} ymm5 = xmm5[0],zero,xmm5[1],zero,xmm5[2],zero,xmm5[3],zero,xmm5[4],zero,xmm5[5],zero,xmm5[6],zero,xmm5[7],zero
+; AVX2-NEXT: vextracti128 $1, %ymm3, %xmm4
+; AVX2-NEXT: vpackusdw %xmm4, %xmm3, %xmm3
+; AVX2-NEXT: vmovdqa {{.*#+}} xmm4 = [16,16,16,16,16,16,16,16]
+; AVX2-NEXT: vpsubw %xmm2, %xmm4, %xmm4
+; AVX2-NEXT: vpmovzxwd {{.*#+}} ymm4 = xmm4[0],zero,xmm4[1],zero,xmm4[2],zero,xmm4[3],zero,xmm4[4],zero,xmm4[5],zero,xmm4[6],zero,xmm4[7],zero
; AVX2-NEXT: vpmovzxwd {{.*#+}} ymm0 = xmm0[0],zero,xmm0[1],zero,xmm0[2],zero,xmm0[3],zero,xmm0[4],zero,xmm0[5],zero,xmm0[6],zero,xmm0[7],zero
-; AVX2-NEXT: vpsllvd %ymm5, %ymm0, %ymm0
-; AVX2-NEXT: vpshufb %ymm4, %ymm0, %ymm0
+; AVX2-NEXT: vpsllvd %ymm4, %ymm0, %ymm0
+; AVX2-NEXT: vpshufb {{.*#+}} ymm0 = ymm0[0,1,4,5,8,9,12,13,8,9,12,13,12,13,14,15,16,17,20,21,24,25,28,29,24,25,28,29,28,29,30,31]
; AVX2-NEXT: vpermq {{.*#+}} ymm0 = ymm0[0,2,2,3]
; AVX2-NEXT: vpor %xmm3, %xmm0, %xmm0
; AVX2-NEXT: vpxor %xmm3, %xmm3, %xmm3
; AVX2-NEXT: vpxor %xmm2, %xmm2, %xmm2
; AVX2-NEXT: vpsubw %xmm1, %xmm2, %xmm1
; AVX2-NEXT: vpand {{.*}}(%rip), %xmm1, %xmm1
-; AVX2-NEXT: vpmovzxwd {{.*#+}} ymm2 = xmm1[0],zero,xmm1[1],zero,xmm1[2],zero,xmm1[3],zero,xmm1[4],zero,xmm1[5],zero,xmm1[6],zero,xmm1[7],zero
+; AVX2-NEXT: vmovdqa {{.*#+}} xmm2 = [16,16,16,16,16,16,16,16]
+; AVX2-NEXT: vpsubw %xmm1, %xmm2, %xmm2
+; AVX2-NEXT: vpmovzxwd {{.*#+}} ymm2 = xmm2[0],zero,xmm2[1],zero,xmm2[2],zero,xmm2[3],zero,xmm2[4],zero,xmm2[5],zero,xmm2[6],zero,xmm2[7],zero
; AVX2-NEXT: vpmovzxwd {{.*#+}} ymm0 = xmm0[0],zero,xmm0[1],zero,xmm0[2],zero,xmm0[3],zero,xmm0[4],zero,xmm0[5],zero,xmm0[6],zero,xmm0[7],zero
-; AVX2-NEXT: vpsllvd %ymm2, %ymm0, %ymm2
-; AVX2-NEXT: vmovdqa {{.*#+}} ymm3 = [0,1,4,5,8,9,12,13,8,9,12,13,12,13,14,15,16,17,20,21,24,25,28,29,24,25,28,29,28,29,30,31]
-; AVX2-NEXT: vpshufb %ymm3, %ymm2, %ymm2
-; AVX2-NEXT: vpermq {{.*#+}} ymm2 = ymm2[0,2,2,3]
-; AVX2-NEXT: vmovdqa {{.*#+}} xmm4 = [16,16,16,16,16,16,16,16]
-; AVX2-NEXT: vpsubw %xmm1, %xmm4, %xmm1
+; AVX2-NEXT: vpsrlvd %ymm2, %ymm0, %ymm2
+; AVX2-NEXT: vextracti128 $1, %ymm2, %xmm3
+; AVX2-NEXT: vpackusdw %xmm3, %xmm2, %xmm2
; AVX2-NEXT: vpmovzxwd {{.*#+}} ymm1 = xmm1[0],zero,xmm1[1],zero,xmm1[2],zero,xmm1[3],zero,xmm1[4],zero,xmm1[5],zero,xmm1[6],zero,xmm1[7],zero
-; AVX2-NEXT: vpsrlvd %ymm1, %ymm0, %ymm0
-; AVX2-NEXT: vpshufb %ymm3, %ymm0, %ymm0
+; AVX2-NEXT: vpsllvd %ymm1, %ymm0, %ymm0
+; AVX2-NEXT: vpshufb {{.*#+}} ymm0 = ymm0[0,1,4,5,8,9,12,13,8,9,12,13,12,13,14,15,16,17,20,21,24,25,28,29,24,25,28,29,28,29,30,31]
; AVX2-NEXT: vpermq {{.*#+}} ymm0 = ymm0[0,2,2,3]
-; AVX2-NEXT: vpor %xmm0, %xmm2, %xmm0
+; AVX2-NEXT: vpor %xmm2, %xmm0, %xmm0
; AVX2-NEXT: vzeroupper
; AVX2-NEXT: retq
;
;
; AVX2-LABEL: var_rotate_v8i16:
; AVX2: # %bb.0:
-; AVX2-NEXT: vpmovzxwd {{.*#+}} ymm0 = xmm0[0],zero,xmm0[1],zero,xmm0[2],zero,xmm0[3],zero,xmm0[4],zero,xmm0[5],zero,xmm0[6],zero,xmm0[7],zero
; AVX2-NEXT: vpand {{.*}}(%rip), %xmm1, %xmm1
-; AVX2-NEXT: vpmovzxwd {{.*#+}} ymm2 = xmm1[0],zero,xmm1[1],zero,xmm1[2],zero,xmm1[3],zero,xmm1[4],zero,xmm1[5],zero,xmm1[6],zero,xmm1[7],zero
-; AVX2-NEXT: vpsllvd %ymm2, %ymm0, %ymm2
-; AVX2-NEXT: vmovdqa {{.*#+}} ymm3 = [0,1,4,5,8,9,12,13,8,9,12,13,12,13,14,15,16,17,20,21,24,25,28,29,24,25,28,29,28,29,30,31]
-; AVX2-NEXT: vpshufb %ymm3, %ymm2, %ymm2
-; AVX2-NEXT: vpermq {{.*#+}} ymm2 = ymm2[0,2,2,3]
-; AVX2-NEXT: vmovdqa {{.*#+}} xmm4 = [16,16,16,16,16,16,16,16]
-; AVX2-NEXT: vpsubw %xmm1, %xmm4, %xmm1
+; AVX2-NEXT: vmovdqa {{.*#+}} xmm2 = [16,16,16,16,16,16,16,16]
+; AVX2-NEXT: vpsubw %xmm1, %xmm2, %xmm2
+; AVX2-NEXT: vpmovzxwd {{.*#+}} ymm2 = xmm2[0],zero,xmm2[1],zero,xmm2[2],zero,xmm2[3],zero,xmm2[4],zero,xmm2[5],zero,xmm2[6],zero,xmm2[7],zero
+; AVX2-NEXT: vpmovzxwd {{.*#+}} ymm0 = xmm0[0],zero,xmm0[1],zero,xmm0[2],zero,xmm0[3],zero,xmm0[4],zero,xmm0[5],zero,xmm0[6],zero,xmm0[7],zero
+; AVX2-NEXT: vpsrlvd %ymm2, %ymm0, %ymm2
+; AVX2-NEXT: vextracti128 $1, %ymm2, %xmm3
+; AVX2-NEXT: vpackusdw %xmm3, %xmm2, %xmm2
; AVX2-NEXT: vpmovzxwd {{.*#+}} ymm1 = xmm1[0],zero,xmm1[1],zero,xmm1[2],zero,xmm1[3],zero,xmm1[4],zero,xmm1[5],zero,xmm1[6],zero,xmm1[7],zero
-; AVX2-NEXT: vpsrlvd %ymm1, %ymm0, %ymm0
-; AVX2-NEXT: vpshufb %ymm3, %ymm0, %ymm0
+; AVX2-NEXT: vpsllvd %ymm1, %ymm0, %ymm0
+; AVX2-NEXT: vpshufb {{.*#+}} ymm0 = ymm0[0,1,4,5,8,9,12,13,8,9,12,13,12,13,14,15,16,17,20,21,24,25,28,29,24,25,28,29,28,29,30,31]
; AVX2-NEXT: vpermq {{.*#+}} ymm0 = ymm0[0,2,2,3]
-; AVX2-NEXT: vpor %xmm0, %xmm2, %xmm0
+; AVX2-NEXT: vpor %xmm2, %xmm0, %xmm0
; AVX2-NEXT: vzeroupper
; AVX2-NEXT: retq
;
; AVX2-NEXT: vpmovzxwd {{.*#+}} ymm1 = xmm1[0],zero,xmm1[1],zero,xmm1[2],zero,xmm1[3],zero,xmm1[4],zero,xmm1[5],zero,xmm1[6],zero,xmm1[7],zero
; AVX2-NEXT: vpmovzxwd {{.*#+}} ymm0 = xmm0[0],zero,xmm0[1],zero,xmm0[2],zero,xmm0[3],zero,xmm0[4],zero,xmm0[5],zero,xmm0[6],zero,xmm0[7],zero
; AVX2-NEXT: vpsrlvd %ymm1, %ymm0, %ymm0
-; AVX2-NEXT: vpshufb {{.*#+}} ymm0 = ymm0[0,1,4,5,8,9,12,13,8,9,12,13,12,13,14,15,16,17,20,21,24,25,28,29,24,25,28,29,28,29,30,31]
-; AVX2-NEXT: vpermq {{.*#+}} ymm0 = ymm0[0,2,2,3]
-; AVX2-NEXT: # kill: def $xmm0 killed $xmm0 killed $ymm0
+; AVX2-NEXT: vextracti128 $1, %ymm0, %xmm1
+; AVX2-NEXT: vpackusdw %xmm1, %xmm0, %xmm0
; AVX2-NEXT: vzeroupper
; AVX2-NEXT: retq
;
; AVX2-NEXT: vpmovzxwd {{.*#+}} ymm1 = xmm1[0],zero,xmm1[1],zero,xmm1[2],zero,xmm1[3],zero,xmm1[4],zero,xmm1[5],zero,xmm1[6],zero,xmm1[7],zero
; AVX2-NEXT: vpmovzxwd {{.*#+}} ymm0 = xmm0[0],zero,xmm0[1],zero,xmm0[2],zero,xmm0[3],zero,xmm0[4],zero,xmm0[5],zero,xmm0[6],zero,xmm0[7],zero
; AVX2-NEXT: vpsrlvd %ymm1, %ymm0, %ymm0
-; AVX2-NEXT: vpshufb {{.*#+}} ymm0 = ymm0[0,1,4,5,8,9,12,13,8,9,12,13,12,13,14,15,16,17,20,21,24,25,28,29,24,25,28,29,28,29,30,31]
-; AVX2-NEXT: vpermq {{.*#+}} ymm0 = ymm0[0,2,2,3]
-; AVX2-NEXT: # kill: def $xmm0 killed $xmm0 killed $ymm0
+; AVX2-NEXT: vextracti128 $1, %ymm0, %xmm1
+; AVX2-NEXT: vpackusdw %xmm1, %xmm0, %xmm0
; AVX2-NEXT: vzeroupper
; AVX2-NEXT: retq
;
; AVX2-NEXT: vpmovzxwd {{.*#+}} ymm1 = xmm1[0],zero,xmm1[1],zero,xmm1[2],zero,xmm1[3],zero,xmm1[4],zero,xmm1[5],zero,xmm1[6],zero,xmm1[7],zero
; AVX2-NEXT: vpmovzxwd {{.*#+}} ymm0 = xmm0[0],zero,xmm0[1],zero,xmm0[2],zero,xmm0[3],zero,xmm0[4],zero,xmm0[5],zero,xmm0[6],zero,xmm0[7],zero
; AVX2-NEXT: vpsrlvd %ymm1, %ymm0, %ymm0
-; AVX2-NEXT: vpshufb {{.*#+}} ymm0 = ymm0[0,1,4,5,8,9,12,13,8,9,12,13,12,13,14,15,16,17,20,21,24,25,28,29,24,25,28,29,28,29,30,31]
-; AVX2-NEXT: vpermq {{.*#+}} ymm0 = ymm0[0,2,2,3]
-; AVX2-NEXT: # kill: def $xmm0 killed $xmm0 killed $ymm0
+; AVX2-NEXT: vextracti128 $1, %ymm0, %xmm1
+; AVX2-NEXT: vpackusdw %xmm1, %xmm0, %xmm0
; AVX2-NEXT: vzeroupper
; AVX2-NEXT: retq
;