From: Craig Topper Date: Thu, 21 Apr 2016 07:30:06 +0000 (+0000) Subject: [AVX512] Add CTTZ support for v8i64 and v16i32 vectors. X-Git-Tag: llvmorg-3.9.0-rc1~8492 X-Git-Url: http://review.tizen.org/git/?a=commitdiff_plain;h=21690db05aedd0f8126f03dadb4d604968568b99;p=platform%2Fupstream%2Fllvm.git [AVX512] Add CTTZ support for v8i64 and v16i32 vectors. llvm-svn: 266968 --- diff --git a/llvm/lib/Target/X86/X86ISelLowering.cpp b/llvm/lib/Target/X86/X86ISelLowering.cpp index cff4936..c551cef 100644 --- a/llvm/lib/Target/X86/X86ISelLowering.cpp +++ b/llvm/lib/Target/X86/X86ISelLowering.cpp @@ -1321,6 +1321,7 @@ X86TargetLowering::X86TargetLowering(const X86TargetMachine &TM, setOperationAction(ISD::OR, VT, Legal); setOperationAction(ISD::XOR, VT, Legal); setOperationAction(ISD::CTPOP, VT, Custom); + setOperationAction(ISD::CTTZ, VT, Custom); } if (Subtarget.hasCDI()) { diff --git a/llvm/test/CodeGen/X86/vector-tzcnt-512.ll b/llvm/test/CodeGen/X86/vector-tzcnt-512.ll index 8481b29..2d17159 100644 --- a/llvm/test/CodeGen/X86/vector-tzcnt-512.ll +++ b/llvm/test/CodeGen/X86/vector-tzcnt-512.ll @@ -4,43 +4,66 @@ ; RUN: llc < %s -mtriple=x86_64-apple-darwin -mcpu=knl -mattr=-avx512cd,+avx512bw | FileCheck %s --check-prefix=ALL --check-prefix=AVX512BW define <8 x i64> @testv8i64(<8 x i64> %in) nounwind { -; ALL-LABEL: testv8i64: -; ALL: ## BB#0: -; ALL-NEXT: vextracti32x4 $3, %zmm0, %xmm1 -; ALL-NEXT: vpextrq $1, %xmm1, %rax -; ALL-NEXT: tzcntq %rax, %rax -; ALL-NEXT: vmovq %rax, %xmm2 -; ALL-NEXT: vmovq %xmm1, %rax -; ALL-NEXT: tzcntq %rax, %rax -; ALL-NEXT: vmovq %rax, %xmm1 -; ALL-NEXT: vpunpcklqdq {{.*#+}} xmm1 = xmm1[0],xmm2[0] -; ALL-NEXT: vextracti32x4 $2, %zmm0, %xmm2 -; ALL-NEXT: vpextrq $1, %xmm2, %rax -; ALL-NEXT: tzcntq %rax, %rax -; ALL-NEXT: vmovq %rax, %xmm3 -; ALL-NEXT: vmovq %xmm2, %rax -; ALL-NEXT: tzcntq %rax, %rax -; ALL-NEXT: vmovq %rax, %xmm2 -; ALL-NEXT: vpunpcklqdq {{.*#+}} xmm2 = xmm2[0],xmm3[0] -; ALL-NEXT: vinserti128 $1, %xmm1, %ymm2, %ymm1 -; ALL-NEXT: vextracti32x4 $1, %zmm0, %xmm2 -; ALL-NEXT: vpextrq $1, %xmm2, %rax -; ALL-NEXT: tzcntq %rax, %rax -; ALL-NEXT: vmovq %rax, %xmm3 -; ALL-NEXT: vmovq %xmm2, %rax -; ALL-NEXT: tzcntq %rax, %rax -; ALL-NEXT: vmovq %rax, %xmm2 -; ALL-NEXT: vpunpcklqdq {{.*#+}} xmm2 = xmm2[0],xmm3[0] -; ALL-NEXT: vpextrq $1, %xmm0, %rax -; ALL-NEXT: tzcntq %rax, %rax -; ALL-NEXT: vmovq %rax, %xmm3 -; ALL-NEXT: vmovq %xmm0, %rax -; ALL-NEXT: tzcntq %rax, %rax -; ALL-NEXT: vmovq %rax, %xmm0 -; ALL-NEXT: vpunpcklqdq {{.*#+}} xmm0 = xmm0[0],xmm3[0] -; ALL-NEXT: vinserti128 $1, %xmm2, %ymm0, %ymm0 -; ALL-NEXT: vinserti64x4 $1, %ymm1, %zmm0, %zmm0 -; ALL-NEXT: retq +; AVX512CD-LABEL: testv8i64: +; AVX512CD: ## BB#0: +; AVX512CD-NEXT: vpxord %zmm1, %zmm1, %zmm1 +; AVX512CD-NEXT: vpsubq %zmm0, %zmm1, %zmm1 +; AVX512CD-NEXT: vpandq %zmm1, %zmm0, %zmm0 +; AVX512CD-NEXT: vpsubq {{.*}}(%rip){1to8}, %zmm0, %zmm0 +; AVX512CD-NEXT: vextracti64x4 $1, %zmm0, %ymm1 +; AVX512CD-NEXT: vmovdqa {{.*#+}} ymm2 = [15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15] +; AVX512CD-NEXT: vpand %ymm2, %ymm1, %ymm3 +; AVX512CD-NEXT: vmovdqa {{.*#+}} ymm4 = [0,1,1,2,1,2,2,3,1,2,2,3,2,3,3,4,0,1,1,2,1,2,2,3,1,2,2,3,2,3,3,4] +; AVX512CD-NEXT: vpshufb %ymm3, %ymm4, %ymm3 +; AVX512CD-NEXT: vpsrlw $4, %ymm1, %ymm1 +; AVX512CD-NEXT: vpand %ymm2, %ymm1, %ymm1 +; AVX512CD-NEXT: vpshufb %ymm1, %ymm4, %ymm1 +; AVX512CD-NEXT: vpaddb %ymm3, %ymm1, %ymm1 +; AVX512CD-NEXT: vpxor %ymm3, %ymm3, %ymm3 +; AVX512CD-NEXT: vpsadbw %ymm3, %ymm1, %ymm1 +; AVX512CD-NEXT: vpand %ymm2, %ymm0, %ymm5 +; AVX512CD-NEXT: vpshufb %ymm5, %ymm4, %ymm5 +; AVX512CD-NEXT: vpsrlw $4, %ymm0, %ymm0 +; AVX512CD-NEXT: vpand %ymm2, %ymm0, %ymm0 +; AVX512CD-NEXT: vpshufb %ymm0, %ymm4, %ymm0 +; AVX512CD-NEXT: vpaddb %ymm5, %ymm0, %ymm0 +; AVX512CD-NEXT: vpsadbw %ymm3, %ymm0, %ymm0 +; AVX512CD-NEXT: vinserti64x4 $1, %ymm1, %zmm0, %zmm0 +; AVX512CD-NEXT: retq +; +; AVX512CDBW-LABEL: testv8i64: +; AVX512CDBW: ## BB#0: +; AVX512CDBW-NEXT: vpxord %zmm1, %zmm1, %zmm1 +; AVX512CDBW-NEXT: vpsubq %zmm0, %zmm1, %zmm2 +; AVX512CDBW-NEXT: vpandq %zmm2, %zmm0, %zmm0 +; AVX512CDBW-NEXT: vpsubq {{.*}}(%rip){1to8}, %zmm0, %zmm0 +; AVX512CDBW-NEXT: vmovdqa64 {{.*#+}} zmm2 = [15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15] +; AVX512CDBW-NEXT: vpandq %zmm2, %zmm0, %zmm3 +; AVX512CDBW-NEXT: vmovdqu8 {{.*#+}} zmm4 = [0,1,1,2,1,2,2,3,1,2,2,3,2,3,3,4,0,1,1,2,1,2,2,3,1,2,2,3,2,3,3,4,0,1,1,2,1,2,2,3,1,2,2,3,2,3,3,4,0,1,1,2,1,2,2,3,1,2,2,3,2,3,3,4] +; AVX512CDBW-NEXT: vpshufb %zmm3, %zmm4, %zmm3 +; AVX512CDBW-NEXT: vpsrlw $4, %zmm0, %zmm0 +; AVX512CDBW-NEXT: vpandq %zmm2, %zmm0, %zmm0 +; AVX512CDBW-NEXT: vpshufb %zmm0, %zmm4, %zmm0 +; AVX512CDBW-NEXT: vpaddb %zmm3, %zmm0, %zmm0 +; AVX512CDBW-NEXT: vpsadbw %zmm1, %zmm0, %zmm0 +; AVX512CDBW-NEXT: retq +; +; AVX512BW-LABEL: testv8i64: +; AVX512BW: ## BB#0: +; AVX512BW-NEXT: vpxord %zmm1, %zmm1, %zmm1 +; AVX512BW-NEXT: vpsubq %zmm0, %zmm1, %zmm2 +; AVX512BW-NEXT: vpandq %zmm2, %zmm0, %zmm0 +; AVX512BW-NEXT: vpsubq {{.*}}(%rip){1to8}, %zmm0, %zmm0 +; AVX512BW-NEXT: vmovdqa64 {{.*#+}} zmm2 = [15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15] +; AVX512BW-NEXT: vpandq %zmm2, %zmm0, %zmm3 +; AVX512BW-NEXT: vmovdqu8 {{.*#+}} zmm4 = [0,1,1,2,1,2,2,3,1,2,2,3,2,3,3,4,0,1,1,2,1,2,2,3,1,2,2,3,2,3,3,4,0,1,1,2,1,2,2,3,1,2,2,3,2,3,3,4,0,1,1,2,1,2,2,3,1,2,2,3,2,3,3,4] +; AVX512BW-NEXT: vpshufb %zmm3, %zmm4, %zmm3 +; AVX512BW-NEXT: vpsrlw $4, %zmm0, %zmm0 +; AVX512BW-NEXT: vpandq %zmm2, %zmm0, %zmm0 +; AVX512BW-NEXT: vpshufb %zmm0, %zmm4, %zmm0 +; AVX512BW-NEXT: vpaddb %zmm3, %zmm0, %zmm0 +; AVX512BW-NEXT: vpsadbw %zmm1, %zmm0, %zmm0 +; AVX512BW-NEXT: retq %out = call <8 x i64> @llvm.cttz.v8i64(<8 x i64> %in, i1 0) ret <8 x i64> %out } @@ -68,103 +91,101 @@ define <8 x i64> @testv8i64u(<8 x i64> %in) nounwind { ; ; AVX512BW-LABEL: testv8i64u: ; AVX512BW: ## BB#0: -; AVX512BW-NEXT: vextracti32x4 $3, %zmm0, %xmm1 -; AVX512BW-NEXT: vpextrq $1, %xmm1, %rax -; AVX512BW-NEXT: tzcntq %rax, %rax -; AVX512BW-NEXT: vmovq %rax, %xmm2 -; AVX512BW-NEXT: vmovq %xmm1, %rax -; AVX512BW-NEXT: tzcntq %rax, %rax -; AVX512BW-NEXT: vmovq %rax, %xmm1 -; AVX512BW-NEXT: vpunpcklqdq {{.*#+}} xmm1 = xmm1[0],xmm2[0] -; AVX512BW-NEXT: vextracti32x4 $2, %zmm0, %xmm2 -; AVX512BW-NEXT: vpextrq $1, %xmm2, %rax -; AVX512BW-NEXT: tzcntq %rax, %rax -; AVX512BW-NEXT: vmovq %rax, %xmm3 -; AVX512BW-NEXT: vmovq %xmm2, %rax -; AVX512BW-NEXT: tzcntq %rax, %rax -; AVX512BW-NEXT: vmovq %rax, %xmm2 -; AVX512BW-NEXT: vpunpcklqdq {{.*#+}} xmm2 = xmm2[0],xmm3[0] -; AVX512BW-NEXT: vinserti128 $1, %xmm1, %ymm2, %ymm1 -; AVX512BW-NEXT: vextracti32x4 $1, %zmm0, %xmm2 -; AVX512BW-NEXT: vpextrq $1, %xmm2, %rax -; AVX512BW-NEXT: tzcntq %rax, %rax -; AVX512BW-NEXT: vmovq %rax, %xmm3 -; AVX512BW-NEXT: vmovq %xmm2, %rax -; AVX512BW-NEXT: tzcntq %rax, %rax -; AVX512BW-NEXT: vmovq %rax, %xmm2 -; AVX512BW-NEXT: vpunpcklqdq {{.*#+}} xmm2 = xmm2[0],xmm3[0] -; AVX512BW-NEXT: vpextrq $1, %xmm0, %rax -; AVX512BW-NEXT: tzcntq %rax, %rax -; AVX512BW-NEXT: vmovq %rax, %xmm3 -; AVX512BW-NEXT: vmovq %xmm0, %rax -; AVX512BW-NEXT: tzcntq %rax, %rax -; AVX512BW-NEXT: vmovq %rax, %xmm0 -; AVX512BW-NEXT: vpunpcklqdq {{.*#+}} xmm0 = xmm0[0],xmm3[0] -; AVX512BW-NEXT: vinserti128 $1, %xmm2, %ymm0, %ymm0 -; AVX512BW-NEXT: vinserti64x4 $1, %ymm1, %zmm0, %zmm0 +; AVX512BW-NEXT: vpxord %zmm1, %zmm1, %zmm1 +; AVX512BW-NEXT: vpsubq %zmm0, %zmm1, %zmm2 +; AVX512BW-NEXT: vpandq %zmm2, %zmm0, %zmm0 +; AVX512BW-NEXT: vpsubq {{.*}}(%rip){1to8}, %zmm0, %zmm0 +; AVX512BW-NEXT: vmovdqa64 {{.*#+}} zmm2 = [15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15] +; AVX512BW-NEXT: vpandq %zmm2, %zmm0, %zmm3 +; AVX512BW-NEXT: vmovdqu8 {{.*#+}} zmm4 = [0,1,1,2,1,2,2,3,1,2,2,3,2,3,3,4,0,1,1,2,1,2,2,3,1,2,2,3,2,3,3,4,0,1,1,2,1,2,2,3,1,2,2,3,2,3,3,4,0,1,1,2,1,2,2,3,1,2,2,3,2,3,3,4] +; AVX512BW-NEXT: vpshufb %zmm3, %zmm4, %zmm3 +; AVX512BW-NEXT: vpsrlw $4, %zmm0, %zmm0 +; AVX512BW-NEXT: vpandq %zmm2, %zmm0, %zmm0 +; AVX512BW-NEXT: vpshufb %zmm0, %zmm4, %zmm0 +; AVX512BW-NEXT: vpaddb %zmm3, %zmm0, %zmm0 +; AVX512BW-NEXT: vpsadbw %zmm1, %zmm0, %zmm0 ; AVX512BW-NEXT: retq %out = call <8 x i64> @llvm.cttz.v8i64(<8 x i64> %in, i1 -1) ret <8 x i64> %out } define <16 x i32> @testv16i32(<16 x i32> %in) nounwind { -; ALL-LABEL: testv16i32: -; ALL: ## BB#0: -; ALL-NEXT: vextracti32x4 $3, %zmm0, %xmm1 -; ALL-NEXT: vpextrd $1, %xmm1, %eax -; ALL-NEXT: tzcntl %eax, %eax -; ALL-NEXT: vmovd %xmm1, %ecx -; ALL-NEXT: tzcntl %ecx, %ecx -; ALL-NEXT: vmovd %ecx, %xmm2 -; ALL-NEXT: vpinsrd $1, %eax, %xmm2, %xmm2 -; ALL-NEXT: vpextrd $2, %xmm1, %eax -; ALL-NEXT: tzcntl %eax, %eax -; ALL-NEXT: vpinsrd $2, %eax, %xmm2, %xmm2 -; ALL-NEXT: vpextrd $3, %xmm1, %eax -; ALL-NEXT: tzcntl %eax, %eax -; ALL-NEXT: vpinsrd $3, %eax, %xmm2, %xmm1 -; ALL-NEXT: vextracti32x4 $2, %zmm0, %xmm2 -; ALL-NEXT: vpextrd $1, %xmm2, %eax -; ALL-NEXT: tzcntl %eax, %eax -; ALL-NEXT: vmovd %xmm2, %ecx -; ALL-NEXT: tzcntl %ecx, %ecx -; ALL-NEXT: vmovd %ecx, %xmm3 -; ALL-NEXT: vpinsrd $1, %eax, %xmm3, %xmm3 -; ALL-NEXT: vpextrd $2, %xmm2, %eax -; ALL-NEXT: tzcntl %eax, %eax -; ALL-NEXT: vpinsrd $2, %eax, %xmm3, %xmm3 -; ALL-NEXT: vpextrd $3, %xmm2, %eax -; ALL-NEXT: tzcntl %eax, %eax -; ALL-NEXT: vpinsrd $3, %eax, %xmm3, %xmm2 -; ALL-NEXT: vinserti128 $1, %xmm1, %ymm2, %ymm1 -; ALL-NEXT: vextracti32x4 $1, %zmm0, %xmm2 -; ALL-NEXT: vpextrd $1, %xmm2, %eax -; ALL-NEXT: tzcntl %eax, %eax -; ALL-NEXT: vmovd %xmm2, %ecx -; ALL-NEXT: tzcntl %ecx, %ecx -; ALL-NEXT: vmovd %ecx, %xmm3 -; ALL-NEXT: vpinsrd $1, %eax, %xmm3, %xmm3 -; ALL-NEXT: vpextrd $2, %xmm2, %eax -; ALL-NEXT: tzcntl %eax, %eax -; ALL-NEXT: vpinsrd $2, %eax, %xmm3, %xmm3 -; ALL-NEXT: vpextrd $3, %xmm2, %eax -; ALL-NEXT: tzcntl %eax, %eax -; ALL-NEXT: vpinsrd $3, %eax, %xmm3, %xmm2 -; ALL-NEXT: vpextrd $1, %xmm0, %eax -; ALL-NEXT: tzcntl %eax, %eax -; ALL-NEXT: vmovd %xmm0, %ecx -; ALL-NEXT: tzcntl %ecx, %ecx -; ALL-NEXT: vmovd %ecx, %xmm3 -; ALL-NEXT: vpinsrd $1, %eax, %xmm3, %xmm3 -; ALL-NEXT: vpextrd $2, %xmm0, %eax -; ALL-NEXT: tzcntl %eax, %eax -; ALL-NEXT: vpinsrd $2, %eax, %xmm3, %xmm3 -; ALL-NEXT: vpextrd $3, %xmm0, %eax -; ALL-NEXT: tzcntl %eax, %eax -; ALL-NEXT: vpinsrd $3, %eax, %xmm3, %xmm0 -; ALL-NEXT: vinserti128 $1, %xmm2, %ymm0, %ymm0 -; ALL-NEXT: vinserti64x4 $1, %ymm1, %zmm0, %zmm0 -; ALL-NEXT: retq +; AVX512CD-LABEL: testv16i32: +; AVX512CD: ## BB#0: +; AVX512CD-NEXT: vpxord %zmm1, %zmm1, %zmm1 +; AVX512CD-NEXT: vpsubd %zmm0, %zmm1, %zmm1 +; AVX512CD-NEXT: vpandd %zmm1, %zmm0, %zmm0 +; AVX512CD-NEXT: vpsubd {{.*}}(%rip){1to16}, %zmm0, %zmm0 +; AVX512CD-NEXT: vextracti64x4 $1, %zmm0, %ymm1 +; AVX512CD-NEXT: vmovdqa {{.*#+}} ymm2 = [15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15] +; AVX512CD-NEXT: vpand %ymm2, %ymm1, %ymm3 +; AVX512CD-NEXT: vmovdqa {{.*#+}} ymm4 = [0,1,1,2,1,2,2,3,1,2,2,3,2,3,3,4,0,1,1,2,1,2,2,3,1,2,2,3,2,3,3,4] +; AVX512CD-NEXT: vpshufb %ymm3, %ymm4, %ymm3 +; AVX512CD-NEXT: vpsrlw $4, %ymm1, %ymm1 +; AVX512CD-NEXT: vpand %ymm2, %ymm1, %ymm1 +; AVX512CD-NEXT: vpshufb %ymm1, %ymm4, %ymm1 +; AVX512CD-NEXT: vpaddb %ymm3, %ymm1, %ymm1 +; AVX512CD-NEXT: vpxor %ymm3, %ymm3, %ymm3 +; AVX512CD-NEXT: vpunpckhdq {{.*#+}} ymm5 = ymm1[2],ymm3[2],ymm1[3],ymm3[3],ymm1[6],ymm3[6],ymm1[7],ymm3[7] +; AVX512CD-NEXT: vpsadbw %ymm3, %ymm5, %ymm5 +; AVX512CD-NEXT: vpunpckldq {{.*#+}} ymm1 = ymm1[0],ymm3[0],ymm1[1],ymm3[1],ymm1[4],ymm3[4],ymm1[5],ymm3[5] +; AVX512CD-NEXT: vpsadbw %ymm3, %ymm1, %ymm1 +; AVX512CD-NEXT: vpackuswb %ymm5, %ymm1, %ymm1 +; AVX512CD-NEXT: vpand %ymm2, %ymm0, %ymm5 +; AVX512CD-NEXT: vpshufb %ymm5, %ymm4, %ymm5 +; AVX512CD-NEXT: vpsrlw $4, %ymm0, %ymm0 +; AVX512CD-NEXT: vpand %ymm2, %ymm0, %ymm0 +; AVX512CD-NEXT: vpshufb %ymm0, %ymm4, %ymm0 +; AVX512CD-NEXT: vpaddb %ymm5, %ymm0, %ymm0 +; AVX512CD-NEXT: vpunpckhdq {{.*#+}} ymm2 = ymm0[2],ymm3[2],ymm0[3],ymm3[3],ymm0[6],ymm3[6],ymm0[7],ymm3[7] +; AVX512CD-NEXT: vpsadbw %ymm3, %ymm2, %ymm2 +; AVX512CD-NEXT: vpunpckldq {{.*#+}} ymm0 = ymm0[0],ymm3[0],ymm0[1],ymm3[1],ymm0[4],ymm3[4],ymm0[5],ymm3[5] +; AVX512CD-NEXT: vpsadbw %ymm3, %ymm0, %ymm0 +; AVX512CD-NEXT: vpackuswb %ymm2, %ymm0, %ymm0 +; AVX512CD-NEXT: vinserti64x4 $1, %ymm1, %zmm0, %zmm0 +; AVX512CD-NEXT: retq +; +; AVX512CDBW-LABEL: testv16i32: +; AVX512CDBW: ## BB#0: +; AVX512CDBW-NEXT: vpxord %zmm1, %zmm1, %zmm1 +; AVX512CDBW-NEXT: vpsubd %zmm0, %zmm1, %zmm2 +; AVX512CDBW-NEXT: vpandd %zmm2, %zmm0, %zmm0 +; AVX512CDBW-NEXT: vpsubd {{.*}}(%rip){1to16}, %zmm0, %zmm0 +; AVX512CDBW-NEXT: vmovdqa64 {{.*#+}} zmm2 = [15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15] +; AVX512CDBW-NEXT: vpandq %zmm2, %zmm0, %zmm3 +; AVX512CDBW-NEXT: vmovdqu8 {{.*#+}} zmm4 = [0,1,1,2,1,2,2,3,1,2,2,3,2,3,3,4,0,1,1,2,1,2,2,3,1,2,2,3,2,3,3,4,0,1,1,2,1,2,2,3,1,2,2,3,2,3,3,4,0,1,1,2,1,2,2,3,1,2,2,3,2,3,3,4] +; AVX512CDBW-NEXT: vpshufb %zmm3, %zmm4, %zmm3 +; AVX512CDBW-NEXT: vpsrlw $4, %zmm0, %zmm0 +; AVX512CDBW-NEXT: vpandq %zmm2, %zmm0, %zmm0 +; AVX512CDBW-NEXT: vpshufb %zmm0, %zmm4, %zmm0 +; AVX512CDBW-NEXT: vpaddb %zmm3, %zmm0, %zmm0 +; AVX512CDBW-NEXT: vpunpckhdq {{.*#+}} zmm2 = zmm0[2],zmm1[2],zmm0[3],zmm1[3],zmm0[6],zmm1[6],zmm0[7],zmm1[7],zmm0[10],zmm1[10],zmm0[11],zmm1[11],zmm0[14],zmm1[14],zmm0[15],zmm1[15] +; AVX512CDBW-NEXT: vpsadbw %zmm1, %zmm2, %zmm2 +; AVX512CDBW-NEXT: vpunpckldq {{.*#+}} zmm0 = zmm0[0],zmm1[0],zmm0[1],zmm1[1],zmm0[4],zmm1[4],zmm0[5],zmm1[5],zmm0[8],zmm1[8],zmm0[9],zmm1[9],zmm0[12],zmm1[12],zmm0[13],zmm1[13] +; AVX512CDBW-NEXT: vpsadbw %zmm1, %zmm0, %zmm0 +; AVX512CDBW-NEXT: vpackuswb %zmm2, %zmm0, %zmm0 +; AVX512CDBW-NEXT: retq +; +; AVX512BW-LABEL: testv16i32: +; AVX512BW: ## BB#0: +; AVX512BW-NEXT: vpxord %zmm1, %zmm1, %zmm1 +; AVX512BW-NEXT: vpsubd %zmm0, %zmm1, %zmm2 +; AVX512BW-NEXT: vpandd %zmm2, %zmm0, %zmm0 +; AVX512BW-NEXT: vpsubd {{.*}}(%rip){1to16}, %zmm0, %zmm0 +; AVX512BW-NEXT: vmovdqa64 {{.*#+}} zmm2 = [15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15] +; AVX512BW-NEXT: vpandq %zmm2, %zmm0, %zmm3 +; AVX512BW-NEXT: vmovdqu8 {{.*#+}} zmm4 = [0,1,1,2,1,2,2,3,1,2,2,3,2,3,3,4,0,1,1,2,1,2,2,3,1,2,2,3,2,3,3,4,0,1,1,2,1,2,2,3,1,2,2,3,2,3,3,4,0,1,1,2,1,2,2,3,1,2,2,3,2,3,3,4] +; AVX512BW-NEXT: vpshufb %zmm3, %zmm4, %zmm3 +; AVX512BW-NEXT: vpsrlw $4, %zmm0, %zmm0 +; AVX512BW-NEXT: vpandq %zmm2, %zmm0, %zmm0 +; AVX512BW-NEXT: vpshufb %zmm0, %zmm4, %zmm0 +; AVX512BW-NEXT: vpaddb %zmm3, %zmm0, %zmm0 +; AVX512BW-NEXT: vpunpckhdq {{.*#+}} zmm2 = zmm0[2],zmm1[2],zmm0[3],zmm1[3],zmm0[6],zmm1[6],zmm0[7],zmm1[7],zmm0[10],zmm1[10],zmm0[11],zmm1[11],zmm0[14],zmm1[14],zmm0[15],zmm1[15] +; AVX512BW-NEXT: vpsadbw %zmm1, %zmm2, %zmm2 +; AVX512BW-NEXT: vpunpckldq {{.*#+}} zmm0 = zmm0[0],zmm1[0],zmm0[1],zmm1[1],zmm0[4],zmm1[4],zmm0[5],zmm1[5],zmm0[8],zmm1[8],zmm0[9],zmm1[9],zmm0[12],zmm1[12],zmm0[13],zmm1[13] +; AVX512BW-NEXT: vpsadbw %zmm1, %zmm0, %zmm0 +; AVX512BW-NEXT: vpackuswb %zmm2, %zmm0, %zmm0 +; AVX512BW-NEXT: retq %out = call <16 x i32> @llvm.cttz.v16i32(<16 x i32> %in, i1 0) ret <16 x i32> %out } @@ -192,60 +213,23 @@ define <16 x i32> @testv16i32u(<16 x i32> %in) nounwind { ; ; AVX512BW-LABEL: testv16i32u: ; AVX512BW: ## BB#0: -; AVX512BW-NEXT: vextracti32x4 $3, %zmm0, %xmm1 -; AVX512BW-NEXT: vpextrd $1, %xmm1, %eax -; AVX512BW-NEXT: tzcntl %eax, %eax -; AVX512BW-NEXT: vmovd %xmm1, %ecx -; AVX512BW-NEXT: tzcntl %ecx, %ecx -; AVX512BW-NEXT: vmovd %ecx, %xmm2 -; AVX512BW-NEXT: vpinsrd $1, %eax, %xmm2, %xmm2 -; AVX512BW-NEXT: vpextrd $2, %xmm1, %eax -; AVX512BW-NEXT: tzcntl %eax, %eax -; AVX512BW-NEXT: vpinsrd $2, %eax, %xmm2, %xmm2 -; AVX512BW-NEXT: vpextrd $3, %xmm1, %eax -; AVX512BW-NEXT: tzcntl %eax, %eax -; AVX512BW-NEXT: vpinsrd $3, %eax, %xmm2, %xmm1 -; AVX512BW-NEXT: vextracti32x4 $2, %zmm0, %xmm2 -; AVX512BW-NEXT: vpextrd $1, %xmm2, %eax -; AVX512BW-NEXT: tzcntl %eax, %eax -; AVX512BW-NEXT: vmovd %xmm2, %ecx -; AVX512BW-NEXT: tzcntl %ecx, %ecx -; AVX512BW-NEXT: vmovd %ecx, %xmm3 -; AVX512BW-NEXT: vpinsrd $1, %eax, %xmm3, %xmm3 -; AVX512BW-NEXT: vpextrd $2, %xmm2, %eax -; AVX512BW-NEXT: tzcntl %eax, %eax -; AVX512BW-NEXT: vpinsrd $2, %eax, %xmm3, %xmm3 -; AVX512BW-NEXT: vpextrd $3, %xmm2, %eax -; AVX512BW-NEXT: tzcntl %eax, %eax -; AVX512BW-NEXT: vpinsrd $3, %eax, %xmm3, %xmm2 -; AVX512BW-NEXT: vinserti128 $1, %xmm1, %ymm2, %ymm1 -; AVX512BW-NEXT: vextracti32x4 $1, %zmm0, %xmm2 -; AVX512BW-NEXT: vpextrd $1, %xmm2, %eax -; AVX512BW-NEXT: tzcntl %eax, %eax -; AVX512BW-NEXT: vmovd %xmm2, %ecx -; AVX512BW-NEXT: tzcntl %ecx, %ecx -; AVX512BW-NEXT: vmovd %ecx, %xmm3 -; AVX512BW-NEXT: vpinsrd $1, %eax, %xmm3, %xmm3 -; AVX512BW-NEXT: vpextrd $2, %xmm2, %eax -; AVX512BW-NEXT: tzcntl %eax, %eax -; AVX512BW-NEXT: vpinsrd $2, %eax, %xmm3, %xmm3 -; AVX512BW-NEXT: vpextrd $3, %xmm2, %eax -; AVX512BW-NEXT: tzcntl %eax, %eax -; AVX512BW-NEXT: vpinsrd $3, %eax, %xmm3, %xmm2 -; AVX512BW-NEXT: vpextrd $1, %xmm0, %eax -; AVX512BW-NEXT: tzcntl %eax, %eax -; AVX512BW-NEXT: vmovd %xmm0, %ecx -; AVX512BW-NEXT: tzcntl %ecx, %ecx -; AVX512BW-NEXT: vmovd %ecx, %xmm3 -; AVX512BW-NEXT: vpinsrd $1, %eax, %xmm3, %xmm3 -; AVX512BW-NEXT: vpextrd $2, %xmm0, %eax -; AVX512BW-NEXT: tzcntl %eax, %eax -; AVX512BW-NEXT: vpinsrd $2, %eax, %xmm3, %xmm3 -; AVX512BW-NEXT: vpextrd $3, %xmm0, %eax -; AVX512BW-NEXT: tzcntl %eax, %eax -; AVX512BW-NEXT: vpinsrd $3, %eax, %xmm3, %xmm0 -; AVX512BW-NEXT: vinserti128 $1, %xmm2, %ymm0, %ymm0 -; AVX512BW-NEXT: vinserti64x4 $1, %ymm1, %zmm0, %zmm0 +; AVX512BW-NEXT: vpxord %zmm1, %zmm1, %zmm1 +; AVX512BW-NEXT: vpsubd %zmm0, %zmm1, %zmm2 +; AVX512BW-NEXT: vpandd %zmm2, %zmm0, %zmm0 +; AVX512BW-NEXT: vpsubd {{.*}}(%rip){1to16}, %zmm0, %zmm0 +; AVX512BW-NEXT: vmovdqa64 {{.*#+}} zmm2 = [15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15] +; AVX512BW-NEXT: vpandq %zmm2, %zmm0, %zmm3 +; AVX512BW-NEXT: vmovdqu8 {{.*#+}} zmm4 = [0,1,1,2,1,2,2,3,1,2,2,3,2,3,3,4,0,1,1,2,1,2,2,3,1,2,2,3,2,3,3,4,0,1,1,2,1,2,2,3,1,2,2,3,2,3,3,4,0,1,1,2,1,2,2,3,1,2,2,3,2,3,3,4] +; AVX512BW-NEXT: vpshufb %zmm3, %zmm4, %zmm3 +; AVX512BW-NEXT: vpsrlw $4, %zmm0, %zmm0 +; AVX512BW-NEXT: vpandq %zmm2, %zmm0, %zmm0 +; AVX512BW-NEXT: vpshufb %zmm0, %zmm4, %zmm0 +; AVX512BW-NEXT: vpaddb %zmm3, %zmm0, %zmm0 +; AVX512BW-NEXT: vpunpckhdq {{.*#+}} zmm2 = zmm0[2],zmm1[2],zmm0[3],zmm1[3],zmm0[6],zmm1[6],zmm0[7],zmm1[7],zmm0[10],zmm1[10],zmm0[11],zmm1[11],zmm0[14],zmm1[14],zmm0[15],zmm1[15] +; AVX512BW-NEXT: vpsadbw %zmm1, %zmm2, %zmm2 +; AVX512BW-NEXT: vpunpckldq {{.*#+}} zmm0 = zmm0[0],zmm1[0],zmm0[1],zmm1[1],zmm0[4],zmm1[4],zmm0[5],zmm1[5],zmm0[8],zmm1[8],zmm0[9],zmm1[9],zmm0[12],zmm1[12],zmm0[13],zmm1[13] +; AVX512BW-NEXT: vpsadbw %zmm1, %zmm0, %zmm0 +; AVX512BW-NEXT: vpackuswb %zmm2, %zmm0, %zmm0 ; AVX512BW-NEXT: retq %out = call <16 x i32> @llvm.cttz.v16i32(<16 x i32> %in, i1 -1) ret <16 x i32> %out