organize them by the width of vector.
This makes it a lot easier to see that we're covering all of the vector
types but not doing so excessively. This also adds tests across the
spectrum of SSE versions in addition to the AVX versions.
If you're really tired of seeing the *massive* sprawl of scalarized code
for this, don't worry, I'm just about to land Bruno's patch that
dramatically improve the situation for SSSE3 and newer.
llvm-svn: 238520
+++ /dev/null
-; RUN: llc < %s -mtriple=x86_64-unknown-unknown -mattr=+avx -mattr=+popcnt | FileCheck --check-prefix=AVX --check-prefix=AVX-POPCNT %s
-; RUN: llc < %s -mtriple=x86_64-unknown-unknown -mattr=+avx -mattr=-popcnt | FileCheck --check-prefix=AVX --check-prefix=AVX-NOPOPCNT %s
-
-define <4 x i32> @testv4i32(<4 x i32> %in) {
-; AVX-POPCNT-LABEL: testv4i32:
-; AVX-POPCNT: # BB#0:
-; AVX-POPCNT-NEXT: vpextrd $1, %xmm0, %eax
-; AVX-POPCNT-NEXT: popcntl %eax, %eax
-; AVX-POPCNT-NEXT: vmovd %xmm0, %ecx
-; AVX-POPCNT-NEXT: popcntl %ecx, %ecx
-; AVX-POPCNT-NEXT: vmovd %ecx, %xmm1
-; AVX-POPCNT-NEXT: vpinsrd $1, %eax, %xmm1, %xmm1
-; AVX-POPCNT-NEXT: vpextrd $2, %xmm0, %eax
-; AVX-POPCNT-NEXT: popcntl %eax, %eax
-; AVX-POPCNT-NEXT: vpinsrd $2, %eax, %xmm1, %xmm1
-; AVX-POPCNT-NEXT: vpextrd $3, %xmm0, %eax
-; AVX-POPCNT-NEXT: popcntl %eax, %eax
-; AVX-POPCNT-NEXT: vpinsrd $3, %eax, %xmm1, %xmm0
-; AVX-POPCNT-NEXT: retq
-;
-; AVX-NOPOPCNT-LABEL: testv4i32:
-; AVX-NOPOPCNT: # BB#0:
-; AVX-NOPOPCNT-NEXT: vpsrld $1, %xmm0, %xmm1
-; AVX-NOPOPCNT-NEXT: vpand {{.*}}(%rip), %xmm1, %xmm1
-; AVX-NOPOPCNT-NEXT: vpsubd %xmm1, %xmm0, %xmm0
-; AVX-NOPOPCNT-NEXT: vmovdqa {{.*#+}} xmm1 = [858993459,858993459,858993459,858993459]
-; AVX-NOPOPCNT-NEXT: vpand %xmm1, %xmm0, %xmm2
-; AVX-NOPOPCNT-NEXT: vpsrld $2, %xmm0, %xmm0
-; AVX-NOPOPCNT-NEXT: vpand %xmm1, %xmm0, %xmm0
-; AVX-NOPOPCNT-NEXT: vpaddd %xmm0, %xmm2, %xmm0
-; AVX-NOPOPCNT-NEXT: vpsrld $4, %xmm0, %xmm1
-; AVX-NOPOPCNT-NEXT: vpaddd %xmm1, %xmm0, %xmm0
-; AVX-NOPOPCNT-NEXT: vpand {{.*}}(%rip), %xmm0, %xmm0
-; AVX-NOPOPCNT-NEXT: vpsrld $8, %xmm0, %xmm1
-; AVX-NOPOPCNT-NEXT: vpaddd %xmm1, %xmm0, %xmm0
-; AVX-NOPOPCNT-NEXT: vpsrld $16, %xmm0, %xmm1
-; AVX-NOPOPCNT-NEXT: vpaddd %xmm1, %xmm0, %xmm0
-; AVX-NOPOPCNT-NEXT: vpand {{.*}}(%rip), %xmm0, %xmm0
-; AVX-NOPOPCNT-NEXT: retq
- %out = call <4 x i32> @llvm.ctpop.v4i32(<4 x i32> %in)
- ret <4 x i32> %out
-}
-
-define <32 x i8> @testv32i8(<32 x i8> %in) {
-; AVX-POPCNT-LABEL: testv32i8:
-; AVX-POPCNT: # BB#0:
-; AVX-POPCNT-NEXT: vextractf128 $1, %ymm0, %xmm1
-; AVX-POPCNT-NEXT: vpextrb $1, %xmm1, %eax
-; AVX-POPCNT-NEXT: popcntw %ax, %ax
-; AVX-POPCNT-NEXT: vpextrb $0, %xmm1, %ecx
-; AVX-POPCNT-NEXT: popcntw %cx, %cx
-; AVX-POPCNT-NEXT: vmovd %ecx, %xmm2
-; AVX-POPCNT-NEXT: vpinsrb $1, %eax, %xmm2, %xmm2
-; AVX-POPCNT-NEXT: vpextrb $2, %xmm1, %eax
-; AVX-POPCNT-NEXT: popcntw %ax, %ax
-; AVX-POPCNT-NEXT: vpinsrb $2, %eax, %xmm2, %xmm2
-; AVX-POPCNT-NEXT: vpextrb $3, %xmm1, %eax
-; AVX-POPCNT-NEXT: popcntw %ax, %ax
-; AVX-POPCNT-NEXT: vpinsrb $3, %eax, %xmm2, %xmm2
-; AVX-POPCNT-NEXT: vpextrb $4, %xmm1, %eax
-; AVX-POPCNT-NEXT: popcntw %ax, %ax
-; AVX-POPCNT-NEXT: vpinsrb $4, %eax, %xmm2, %xmm2
-; AVX-POPCNT-NEXT: vpextrb $5, %xmm1, %eax
-; AVX-POPCNT-NEXT: popcntw %ax, %ax
-; AVX-POPCNT-NEXT: vpinsrb $5, %eax, %xmm2, %xmm2
-; AVX-POPCNT-NEXT: vpextrb $6, %xmm1, %eax
-; AVX-POPCNT-NEXT: popcntw %ax, %ax
-; AVX-POPCNT-NEXT: vpinsrb $6, %eax, %xmm2, %xmm2
-; AVX-POPCNT-NEXT: vpextrb $7, %xmm1, %eax
-; AVX-POPCNT-NEXT: popcntw %ax, %ax
-; AVX-POPCNT-NEXT: vpinsrb $7, %eax, %xmm2, %xmm2
-; AVX-POPCNT-NEXT: vpextrb $8, %xmm1, %eax
-; AVX-POPCNT-NEXT: popcntw %ax, %ax
-; AVX-POPCNT-NEXT: vpinsrb $8, %eax, %xmm2, %xmm2
-; AVX-POPCNT-NEXT: vpextrb $9, %xmm1, %eax
-; AVX-POPCNT-NEXT: popcntw %ax, %ax
-; AVX-POPCNT-NEXT: vpinsrb $9, %eax, %xmm2, %xmm2
-; AVX-POPCNT-NEXT: vpextrb $10, %xmm1, %eax
-; AVX-POPCNT-NEXT: popcntw %ax, %ax
-; AVX-POPCNT-NEXT: vpinsrb $10, %eax, %xmm2, %xmm2
-; AVX-POPCNT-NEXT: vpextrb $11, %xmm1, %eax
-; AVX-POPCNT-NEXT: popcntw %ax, %ax
-; AVX-POPCNT-NEXT: vpinsrb $11, %eax, %xmm2, %xmm2
-; AVX-POPCNT-NEXT: vpextrb $12, %xmm1, %eax
-; AVX-POPCNT-NEXT: popcntw %ax, %ax
-; AVX-POPCNT-NEXT: vpinsrb $12, %eax, %xmm2, %xmm2
-; AVX-POPCNT-NEXT: vpextrb $13, %xmm1, %eax
-; AVX-POPCNT-NEXT: popcntw %ax, %ax
-; AVX-POPCNT-NEXT: vpinsrb $13, %eax, %xmm2, %xmm2
-; AVX-POPCNT-NEXT: vpextrb $14, %xmm1, %eax
-; AVX-POPCNT-NEXT: popcntw %ax, %ax
-; AVX-POPCNT-NEXT: vpinsrb $14, %eax, %xmm2, %xmm2
-; AVX-POPCNT-NEXT: vpextrb $15, %xmm1, %eax
-; AVX-POPCNT-NEXT: popcntw %ax, %ax
-; AVX-POPCNT-NEXT: vpinsrb $15, %eax, %xmm2, %xmm1
-; AVX-POPCNT-NEXT: vpextrb $1, %xmm0, %eax
-; AVX-POPCNT-NEXT: popcntw %ax, %ax
-; AVX-POPCNT-NEXT: vpextrb $0, %xmm0, %ecx
-; AVX-POPCNT-NEXT: popcntw %cx, %cx
-; AVX-POPCNT-NEXT: vmovd %ecx, %xmm2
-; AVX-POPCNT-NEXT: vpinsrb $1, %eax, %xmm2, %xmm2
-; AVX-POPCNT-NEXT: vpextrb $2, %xmm0, %eax
-; AVX-POPCNT-NEXT: popcntw %ax, %ax
-; AVX-POPCNT-NEXT: vpinsrb $2, %eax, %xmm2, %xmm2
-; AVX-POPCNT-NEXT: vpextrb $3, %xmm0, %eax
-; AVX-POPCNT-NEXT: popcntw %ax, %ax
-; AVX-POPCNT-NEXT: vpinsrb $3, %eax, %xmm2, %xmm2
-; AVX-POPCNT-NEXT: vpextrb $4, %xmm0, %eax
-; AVX-POPCNT-NEXT: popcntw %ax, %ax
-; AVX-POPCNT-NEXT: vpinsrb $4, %eax, %xmm2, %xmm2
-; AVX-POPCNT-NEXT: vpextrb $5, %xmm0, %eax
-; AVX-POPCNT-NEXT: popcntw %ax, %ax
-; AVX-POPCNT-NEXT: vpinsrb $5, %eax, %xmm2, %xmm2
-; AVX-POPCNT-NEXT: vpextrb $6, %xmm0, %eax
-; AVX-POPCNT-NEXT: popcntw %ax, %ax
-; AVX-POPCNT-NEXT: vpinsrb $6, %eax, %xmm2, %xmm2
-; AVX-POPCNT-NEXT: vpextrb $7, %xmm0, %eax
-; AVX-POPCNT-NEXT: popcntw %ax, %ax
-; AVX-POPCNT-NEXT: vpinsrb $7, %eax, %xmm2, %xmm2
-; AVX-POPCNT-NEXT: vpextrb $8, %xmm0, %eax
-; AVX-POPCNT-NEXT: popcntw %ax, %ax
-; AVX-POPCNT-NEXT: vpinsrb $8, %eax, %xmm2, %xmm2
-; AVX-POPCNT-NEXT: vpextrb $9, %xmm0, %eax
-; AVX-POPCNT-NEXT: popcntw %ax, %ax
-; AVX-POPCNT-NEXT: vpinsrb $9, %eax, %xmm2, %xmm2
-; AVX-POPCNT-NEXT: vpextrb $10, %xmm0, %eax
-; AVX-POPCNT-NEXT: popcntw %ax, %ax
-; AVX-POPCNT-NEXT: vpinsrb $10, %eax, %xmm2, %xmm2
-; AVX-POPCNT-NEXT: vpextrb $11, %xmm0, %eax
-; AVX-POPCNT-NEXT: popcntw %ax, %ax
-; AVX-POPCNT-NEXT: vpinsrb $11, %eax, %xmm2, %xmm2
-; AVX-POPCNT-NEXT: vpextrb $12, %xmm0, %eax
-; AVX-POPCNT-NEXT: popcntw %ax, %ax
-; AVX-POPCNT-NEXT: vpinsrb $12, %eax, %xmm2, %xmm2
-; AVX-POPCNT-NEXT: vpextrb $13, %xmm0, %eax
-; AVX-POPCNT-NEXT: popcntw %ax, %ax
-; AVX-POPCNT-NEXT: vpinsrb $13, %eax, %xmm2, %xmm2
-; AVX-POPCNT-NEXT: vpextrb $14, %xmm0, %eax
-; AVX-POPCNT-NEXT: popcntw %ax, %ax
-; AVX-POPCNT-NEXT: vpinsrb $14, %eax, %xmm2, %xmm2
-; AVX-POPCNT-NEXT: vpextrb $15, %xmm0, %eax
-; AVX-POPCNT-NEXT: popcntw %ax, %ax
-; AVX-POPCNT-NEXT: vpinsrb $15, %eax, %xmm2, %xmm0
-; AVX-POPCNT-NEXT: vinsertf128 $1, %xmm1, %ymm0, %ymm0
-; AVX-POPCNT-NEXT: retq
-;
-; AVX-NOPOPCNT-LABEL: testv32i8:
-; AVX-NOPOPCNT: # BB#0:
-; AVX-NOPOPCNT-NEXT: vextractf128 $1, %ymm0, %xmm1
-; AVX-NOPOPCNT-NEXT: vpextrb $1, %xmm1, %eax
-; AVX-NOPOPCNT-NEXT: movb %al, %cl
-; AVX-NOPOPCNT-NEXT: shrb %cl
-; AVX-NOPOPCNT-NEXT: andb $85, %cl
-; AVX-NOPOPCNT-NEXT: subb %cl, %al
-; AVX-NOPOPCNT-NEXT: movb %al, %cl
-; AVX-NOPOPCNT-NEXT: andb $51, %cl
-; AVX-NOPOPCNT-NEXT: shrb $2, %al
-; AVX-NOPOPCNT-NEXT: andb $51, %al
-; AVX-NOPOPCNT-NEXT: addb %cl, %al
-; AVX-NOPOPCNT-NEXT: movb %al, %cl
-; AVX-NOPOPCNT-NEXT: shrb $4, %cl
-; AVX-NOPOPCNT-NEXT: addb %al, %cl
-; AVX-NOPOPCNT-NEXT: andb $15, %cl
-; AVX-NOPOPCNT-NEXT: movzbl %cl, %eax
-; AVX-NOPOPCNT-NEXT: vpextrb $0, %xmm1, %ecx
-; AVX-NOPOPCNT-NEXT: movb %cl, %dl
-; AVX-NOPOPCNT-NEXT: shrb %dl
-; AVX-NOPOPCNT-NEXT: andb $85, %dl
-; AVX-NOPOPCNT-NEXT: subb %dl, %cl
-; AVX-NOPOPCNT-NEXT: movb %cl, %dl
-; AVX-NOPOPCNT-NEXT: andb $51, %dl
-; AVX-NOPOPCNT-NEXT: shrb $2, %cl
-; AVX-NOPOPCNT-NEXT: andb $51, %cl
-; AVX-NOPOPCNT-NEXT: addb %dl, %cl
-; AVX-NOPOPCNT-NEXT: movb %cl, %dl
-; AVX-NOPOPCNT-NEXT: shrb $4, %dl
-; AVX-NOPOPCNT-NEXT: addb %cl, %dl
-; AVX-NOPOPCNT-NEXT: andb $15, %dl
-; AVX-NOPOPCNT-NEXT: movzbl %dl, %ecx
-; AVX-NOPOPCNT-NEXT: vmovd %ecx, %xmm2
-; AVX-NOPOPCNT-NEXT: vpinsrb $1, %eax, %xmm2, %xmm2
-; AVX-NOPOPCNT-NEXT: vpextrb $2, %xmm1, %eax
-; AVX-NOPOPCNT-NEXT: movb %al, %cl
-; AVX-NOPOPCNT-NEXT: shrb %cl
-; AVX-NOPOPCNT-NEXT: andb $85, %cl
-; AVX-NOPOPCNT-NEXT: subb %cl, %al
-; AVX-NOPOPCNT-NEXT: movb %al, %cl
-; AVX-NOPOPCNT-NEXT: andb $51, %cl
-; AVX-NOPOPCNT-NEXT: shrb $2, %al
-; AVX-NOPOPCNT-NEXT: andb $51, %al
-; AVX-NOPOPCNT-NEXT: addb %cl, %al
-; AVX-NOPOPCNT-NEXT: movb %al, %cl
-; AVX-NOPOPCNT-NEXT: shrb $4, %cl
-; AVX-NOPOPCNT-NEXT: addb %al, %cl
-; AVX-NOPOPCNT-NEXT: andb $15, %cl
-; AVX-NOPOPCNT-NEXT: movzbl %cl, %eax
-; AVX-NOPOPCNT-NEXT: vpinsrb $2, %eax, %xmm2, %xmm2
-; AVX-NOPOPCNT-NEXT: vpextrb $3, %xmm1, %eax
-; AVX-NOPOPCNT-NEXT: movb %al, %cl
-; AVX-NOPOPCNT-NEXT: shrb %cl
-; AVX-NOPOPCNT-NEXT: andb $85, %cl
-; AVX-NOPOPCNT-NEXT: subb %cl, %al
-; AVX-NOPOPCNT-NEXT: movb %al, %cl
-; AVX-NOPOPCNT-NEXT: andb $51, %cl
-; AVX-NOPOPCNT-NEXT: shrb $2, %al
-; AVX-NOPOPCNT-NEXT: andb $51, %al
-; AVX-NOPOPCNT-NEXT: addb %cl, %al
-; AVX-NOPOPCNT-NEXT: movb %al, %cl
-; AVX-NOPOPCNT-NEXT: shrb $4, %cl
-; AVX-NOPOPCNT-NEXT: addb %al, %cl
-; AVX-NOPOPCNT-NEXT: andb $15, %cl
-; AVX-NOPOPCNT-NEXT: movzbl %cl, %eax
-; AVX-NOPOPCNT-NEXT: vpinsrb $3, %eax, %xmm2, %xmm2
-; AVX-NOPOPCNT-NEXT: vpextrb $4, %xmm1, %eax
-; AVX-NOPOPCNT-NEXT: movb %al, %cl
-; AVX-NOPOPCNT-NEXT: shrb %cl
-; AVX-NOPOPCNT-NEXT: andb $85, %cl
-; AVX-NOPOPCNT-NEXT: subb %cl, %al
-; AVX-NOPOPCNT-NEXT: movb %al, %cl
-; AVX-NOPOPCNT-NEXT: andb $51, %cl
-; AVX-NOPOPCNT-NEXT: shrb $2, %al
-; AVX-NOPOPCNT-NEXT: andb $51, %al
-; AVX-NOPOPCNT-NEXT: addb %cl, %al
-; AVX-NOPOPCNT-NEXT: movb %al, %cl
-; AVX-NOPOPCNT-NEXT: shrb $4, %cl
-; AVX-NOPOPCNT-NEXT: addb %al, %cl
-; AVX-NOPOPCNT-NEXT: andb $15, %cl
-; AVX-NOPOPCNT-NEXT: movzbl %cl, %eax
-; AVX-NOPOPCNT-NEXT: vpinsrb $4, %eax, %xmm2, %xmm2
-; AVX-NOPOPCNT-NEXT: vpextrb $5, %xmm1, %eax
-; AVX-NOPOPCNT-NEXT: movb %al, %cl
-; AVX-NOPOPCNT-NEXT: shrb %cl
-; AVX-NOPOPCNT-NEXT: andb $85, %cl
-; AVX-NOPOPCNT-NEXT: subb %cl, %al
-; AVX-NOPOPCNT-NEXT: movb %al, %cl
-; AVX-NOPOPCNT-NEXT: andb $51, %cl
-; AVX-NOPOPCNT-NEXT: shrb $2, %al
-; AVX-NOPOPCNT-NEXT: andb $51, %al
-; AVX-NOPOPCNT-NEXT: addb %cl, %al
-; AVX-NOPOPCNT-NEXT: movb %al, %cl
-; AVX-NOPOPCNT-NEXT: shrb $4, %cl
-; AVX-NOPOPCNT-NEXT: addb %al, %cl
-; AVX-NOPOPCNT-NEXT: andb $15, %cl
-; AVX-NOPOPCNT-NEXT: movzbl %cl, %eax
-; AVX-NOPOPCNT-NEXT: vpinsrb $5, %eax, %xmm2, %xmm2
-; AVX-NOPOPCNT-NEXT: vpextrb $6, %xmm1, %eax
-; AVX-NOPOPCNT-NEXT: movb %al, %cl
-; AVX-NOPOPCNT-NEXT: shrb %cl
-; AVX-NOPOPCNT-NEXT: andb $85, %cl
-; AVX-NOPOPCNT-NEXT: subb %cl, %al
-; AVX-NOPOPCNT-NEXT: movb %al, %cl
-; AVX-NOPOPCNT-NEXT: andb $51, %cl
-; AVX-NOPOPCNT-NEXT: shrb $2, %al
-; AVX-NOPOPCNT-NEXT: andb $51, %al
-; AVX-NOPOPCNT-NEXT: addb %cl, %al
-; AVX-NOPOPCNT-NEXT: movb %al, %cl
-; AVX-NOPOPCNT-NEXT: shrb $4, %cl
-; AVX-NOPOPCNT-NEXT: addb %al, %cl
-; AVX-NOPOPCNT-NEXT: andb $15, %cl
-; AVX-NOPOPCNT-NEXT: movzbl %cl, %eax
-; AVX-NOPOPCNT-NEXT: vpinsrb $6, %eax, %xmm2, %xmm2
-; AVX-NOPOPCNT-NEXT: vpextrb $7, %xmm1, %eax
-; AVX-NOPOPCNT-NEXT: movb %al, %cl
-; AVX-NOPOPCNT-NEXT: shrb %cl
-; AVX-NOPOPCNT-NEXT: andb $85, %cl
-; AVX-NOPOPCNT-NEXT: subb %cl, %al
-; AVX-NOPOPCNT-NEXT: movb %al, %cl
-; AVX-NOPOPCNT-NEXT: andb $51, %cl
-; AVX-NOPOPCNT-NEXT: shrb $2, %al
-; AVX-NOPOPCNT-NEXT: andb $51, %al
-; AVX-NOPOPCNT-NEXT: addb %cl, %al
-; AVX-NOPOPCNT-NEXT: movb %al, %cl
-; AVX-NOPOPCNT-NEXT: shrb $4, %cl
-; AVX-NOPOPCNT-NEXT: addb %al, %cl
-; AVX-NOPOPCNT-NEXT: andb $15, %cl
-; AVX-NOPOPCNT-NEXT: movzbl %cl, %eax
-; AVX-NOPOPCNT-NEXT: vpinsrb $7, %eax, %xmm2, %xmm2
-; AVX-NOPOPCNT-NEXT: vpextrb $8, %xmm1, %eax
-; AVX-NOPOPCNT-NEXT: movb %al, %cl
-; AVX-NOPOPCNT-NEXT: shrb %cl
-; AVX-NOPOPCNT-NEXT: andb $85, %cl
-; AVX-NOPOPCNT-NEXT: subb %cl, %al
-; AVX-NOPOPCNT-NEXT: movb %al, %cl
-; AVX-NOPOPCNT-NEXT: andb $51, %cl
-; AVX-NOPOPCNT-NEXT: shrb $2, %al
-; AVX-NOPOPCNT-NEXT: andb $51, %al
-; AVX-NOPOPCNT-NEXT: addb %cl, %al
-; AVX-NOPOPCNT-NEXT: movb %al, %cl
-; AVX-NOPOPCNT-NEXT: shrb $4, %cl
-; AVX-NOPOPCNT-NEXT: addb %al, %cl
-; AVX-NOPOPCNT-NEXT: andb $15, %cl
-; AVX-NOPOPCNT-NEXT: movzbl %cl, %eax
-; AVX-NOPOPCNT-NEXT: vpinsrb $8, %eax, %xmm2, %xmm2
-; AVX-NOPOPCNT-NEXT: vpextrb $9, %xmm1, %eax
-; AVX-NOPOPCNT-NEXT: movb %al, %cl
-; AVX-NOPOPCNT-NEXT: shrb %cl
-; AVX-NOPOPCNT-NEXT: andb $85, %cl
-; AVX-NOPOPCNT-NEXT: subb %cl, %al
-; AVX-NOPOPCNT-NEXT: movb %al, %cl
-; AVX-NOPOPCNT-NEXT: andb $51, %cl
-; AVX-NOPOPCNT-NEXT: shrb $2, %al
-; AVX-NOPOPCNT-NEXT: andb $51, %al
-; AVX-NOPOPCNT-NEXT: addb %cl, %al
-; AVX-NOPOPCNT-NEXT: movb %al, %cl
-; AVX-NOPOPCNT-NEXT: shrb $4, %cl
-; AVX-NOPOPCNT-NEXT: addb %al, %cl
-; AVX-NOPOPCNT-NEXT: andb $15, %cl
-; AVX-NOPOPCNT-NEXT: movzbl %cl, %eax
-; AVX-NOPOPCNT-NEXT: vpinsrb $9, %eax, %xmm2, %xmm2
-; AVX-NOPOPCNT-NEXT: vpextrb $10, %xmm1, %eax
-; AVX-NOPOPCNT-NEXT: movb %al, %cl
-; AVX-NOPOPCNT-NEXT: shrb %cl
-; AVX-NOPOPCNT-NEXT: andb $85, %cl
-; AVX-NOPOPCNT-NEXT: subb %cl, %al
-; AVX-NOPOPCNT-NEXT: movb %al, %cl
-; AVX-NOPOPCNT-NEXT: andb $51, %cl
-; AVX-NOPOPCNT-NEXT: shrb $2, %al
-; AVX-NOPOPCNT-NEXT: andb $51, %al
-; AVX-NOPOPCNT-NEXT: addb %cl, %al
-; AVX-NOPOPCNT-NEXT: movb %al, %cl
-; AVX-NOPOPCNT-NEXT: shrb $4, %cl
-; AVX-NOPOPCNT-NEXT: addb %al, %cl
-; AVX-NOPOPCNT-NEXT: andb $15, %cl
-; AVX-NOPOPCNT-NEXT: movzbl %cl, %eax
-; AVX-NOPOPCNT-NEXT: vpinsrb $10, %eax, %xmm2, %xmm2
-; AVX-NOPOPCNT-NEXT: vpextrb $11, %xmm1, %eax
-; AVX-NOPOPCNT-NEXT: movb %al, %cl
-; AVX-NOPOPCNT-NEXT: shrb %cl
-; AVX-NOPOPCNT-NEXT: andb $85, %cl
-; AVX-NOPOPCNT-NEXT: subb %cl, %al
-; AVX-NOPOPCNT-NEXT: movb %al, %cl
-; AVX-NOPOPCNT-NEXT: andb $51, %cl
-; AVX-NOPOPCNT-NEXT: shrb $2, %al
-; AVX-NOPOPCNT-NEXT: andb $51, %al
-; AVX-NOPOPCNT-NEXT: addb %cl, %al
-; AVX-NOPOPCNT-NEXT: movb %al, %cl
-; AVX-NOPOPCNT-NEXT: shrb $4, %cl
-; AVX-NOPOPCNT-NEXT: addb %al, %cl
-; AVX-NOPOPCNT-NEXT: andb $15, %cl
-; AVX-NOPOPCNT-NEXT: movzbl %cl, %eax
-; AVX-NOPOPCNT-NEXT: vpinsrb $11, %eax, %xmm2, %xmm2
-; AVX-NOPOPCNT-NEXT: vpextrb $12, %xmm1, %eax
-; AVX-NOPOPCNT-NEXT: movb %al, %cl
-; AVX-NOPOPCNT-NEXT: shrb %cl
-; AVX-NOPOPCNT-NEXT: andb $85, %cl
-; AVX-NOPOPCNT-NEXT: subb %cl, %al
-; AVX-NOPOPCNT-NEXT: movb %al, %cl
-; AVX-NOPOPCNT-NEXT: andb $51, %cl
-; AVX-NOPOPCNT-NEXT: shrb $2, %al
-; AVX-NOPOPCNT-NEXT: andb $51, %al
-; AVX-NOPOPCNT-NEXT: addb %cl, %al
-; AVX-NOPOPCNT-NEXT: movb %al, %cl
-; AVX-NOPOPCNT-NEXT: shrb $4, %cl
-; AVX-NOPOPCNT-NEXT: addb %al, %cl
-; AVX-NOPOPCNT-NEXT: andb $15, %cl
-; AVX-NOPOPCNT-NEXT: movzbl %cl, %eax
-; AVX-NOPOPCNT-NEXT: vpinsrb $12, %eax, %xmm2, %xmm2
-; AVX-NOPOPCNT-NEXT: vpextrb $13, %xmm1, %eax
-; AVX-NOPOPCNT-NEXT: movb %al, %cl
-; AVX-NOPOPCNT-NEXT: shrb %cl
-; AVX-NOPOPCNT-NEXT: andb $85, %cl
-; AVX-NOPOPCNT-NEXT: subb %cl, %al
-; AVX-NOPOPCNT-NEXT: movb %al, %cl
-; AVX-NOPOPCNT-NEXT: andb $51, %cl
-; AVX-NOPOPCNT-NEXT: shrb $2, %al
-; AVX-NOPOPCNT-NEXT: andb $51, %al
-; AVX-NOPOPCNT-NEXT: addb %cl, %al
-; AVX-NOPOPCNT-NEXT: movb %al, %cl
-; AVX-NOPOPCNT-NEXT: shrb $4, %cl
-; AVX-NOPOPCNT-NEXT: addb %al, %cl
-; AVX-NOPOPCNT-NEXT: andb $15, %cl
-; AVX-NOPOPCNT-NEXT: movzbl %cl, %eax
-; AVX-NOPOPCNT-NEXT: vpinsrb $13, %eax, %xmm2, %xmm2
-; AVX-NOPOPCNT-NEXT: vpextrb $14, %xmm1, %eax
-; AVX-NOPOPCNT-NEXT: movb %al, %cl
-; AVX-NOPOPCNT-NEXT: shrb %cl
-; AVX-NOPOPCNT-NEXT: andb $85, %cl
-; AVX-NOPOPCNT-NEXT: subb %cl, %al
-; AVX-NOPOPCNT-NEXT: movb %al, %cl
-; AVX-NOPOPCNT-NEXT: andb $51, %cl
-; AVX-NOPOPCNT-NEXT: shrb $2, %al
-; AVX-NOPOPCNT-NEXT: andb $51, %al
-; AVX-NOPOPCNT-NEXT: addb %cl, %al
-; AVX-NOPOPCNT-NEXT: movb %al, %cl
-; AVX-NOPOPCNT-NEXT: shrb $4, %cl
-; AVX-NOPOPCNT-NEXT: addb %al, %cl
-; AVX-NOPOPCNT-NEXT: andb $15, %cl
-; AVX-NOPOPCNT-NEXT: movzbl %cl, %eax
-; AVX-NOPOPCNT-NEXT: vpinsrb $14, %eax, %xmm2, %xmm2
-; AVX-NOPOPCNT-NEXT: vpextrb $15, %xmm1, %eax
-; AVX-NOPOPCNT-NEXT: movb %al, %cl
-; AVX-NOPOPCNT-NEXT: shrb %cl
-; AVX-NOPOPCNT-NEXT: andb $85, %cl
-; AVX-NOPOPCNT-NEXT: subb %cl, %al
-; AVX-NOPOPCNT-NEXT: movb %al, %cl
-; AVX-NOPOPCNT-NEXT: andb $51, %cl
-; AVX-NOPOPCNT-NEXT: shrb $2, %al
-; AVX-NOPOPCNT-NEXT: andb $51, %al
-; AVX-NOPOPCNT-NEXT: addb %cl, %al
-; AVX-NOPOPCNT-NEXT: movb %al, %cl
-; AVX-NOPOPCNT-NEXT: shrb $4, %cl
-; AVX-NOPOPCNT-NEXT: addb %al, %cl
-; AVX-NOPOPCNT-NEXT: andb $15, %cl
-; AVX-NOPOPCNT-NEXT: movzbl %cl, %eax
-; AVX-NOPOPCNT-NEXT: vpinsrb $15, %eax, %xmm2, %xmm1
-; AVX-NOPOPCNT-NEXT: vpextrb $1, %xmm0, %eax
-; AVX-NOPOPCNT-NEXT: movb %al, %cl
-; AVX-NOPOPCNT-NEXT: shrb %cl
-; AVX-NOPOPCNT-NEXT: andb $85, %cl
-; AVX-NOPOPCNT-NEXT: subb %cl, %al
-; AVX-NOPOPCNT-NEXT: movb %al, %cl
-; AVX-NOPOPCNT-NEXT: andb $51, %cl
-; AVX-NOPOPCNT-NEXT: shrb $2, %al
-; AVX-NOPOPCNT-NEXT: andb $51, %al
-; AVX-NOPOPCNT-NEXT: addb %cl, %al
-; AVX-NOPOPCNT-NEXT: movb %al, %cl
-; AVX-NOPOPCNT-NEXT: shrb $4, %cl
-; AVX-NOPOPCNT-NEXT: addb %al, %cl
-; AVX-NOPOPCNT-NEXT: andb $15, %cl
-; AVX-NOPOPCNT-NEXT: movzbl %cl, %eax
-; AVX-NOPOPCNT-NEXT: vpextrb $0, %xmm0, %ecx
-; AVX-NOPOPCNT-NEXT: movb %cl, %dl
-; AVX-NOPOPCNT-NEXT: shrb %dl
-; AVX-NOPOPCNT-NEXT: andb $85, %dl
-; AVX-NOPOPCNT-NEXT: subb %dl, %cl
-; AVX-NOPOPCNT-NEXT: movb %cl, %dl
-; AVX-NOPOPCNT-NEXT: andb $51, %dl
-; AVX-NOPOPCNT-NEXT: shrb $2, %cl
-; AVX-NOPOPCNT-NEXT: andb $51, %cl
-; AVX-NOPOPCNT-NEXT: addb %dl, %cl
-; AVX-NOPOPCNT-NEXT: movb %cl, %dl
-; AVX-NOPOPCNT-NEXT: shrb $4, %dl
-; AVX-NOPOPCNT-NEXT: addb %cl, %dl
-; AVX-NOPOPCNT-NEXT: andb $15, %dl
-; AVX-NOPOPCNT-NEXT: movzbl %dl, %ecx
-; AVX-NOPOPCNT-NEXT: vmovd %ecx, %xmm2
-; AVX-NOPOPCNT-NEXT: vpinsrb $1, %eax, %xmm2, %xmm2
-; AVX-NOPOPCNT-NEXT: vpextrb $2, %xmm0, %eax
-; AVX-NOPOPCNT-NEXT: movb %al, %cl
-; AVX-NOPOPCNT-NEXT: shrb %cl
-; AVX-NOPOPCNT-NEXT: andb $85, %cl
-; AVX-NOPOPCNT-NEXT: subb %cl, %al
-; AVX-NOPOPCNT-NEXT: movb %al, %cl
-; AVX-NOPOPCNT-NEXT: andb $51, %cl
-; AVX-NOPOPCNT-NEXT: shrb $2, %al
-; AVX-NOPOPCNT-NEXT: andb $51, %al
-; AVX-NOPOPCNT-NEXT: addb %cl, %al
-; AVX-NOPOPCNT-NEXT: movb %al, %cl
-; AVX-NOPOPCNT-NEXT: shrb $4, %cl
-; AVX-NOPOPCNT-NEXT: addb %al, %cl
-; AVX-NOPOPCNT-NEXT: andb $15, %cl
-; AVX-NOPOPCNT-NEXT: movzbl %cl, %eax
-; AVX-NOPOPCNT-NEXT: vpinsrb $2, %eax, %xmm2, %xmm2
-; AVX-NOPOPCNT-NEXT: vpextrb $3, %xmm0, %eax
-; AVX-NOPOPCNT-NEXT: movb %al, %cl
-; AVX-NOPOPCNT-NEXT: shrb %cl
-; AVX-NOPOPCNT-NEXT: andb $85, %cl
-; AVX-NOPOPCNT-NEXT: subb %cl, %al
-; AVX-NOPOPCNT-NEXT: movb %al, %cl
-; AVX-NOPOPCNT-NEXT: andb $51, %cl
-; AVX-NOPOPCNT-NEXT: shrb $2, %al
-; AVX-NOPOPCNT-NEXT: andb $51, %al
-; AVX-NOPOPCNT-NEXT: addb %cl, %al
-; AVX-NOPOPCNT-NEXT: movb %al, %cl
-; AVX-NOPOPCNT-NEXT: shrb $4, %cl
-; AVX-NOPOPCNT-NEXT: addb %al, %cl
-; AVX-NOPOPCNT-NEXT: andb $15, %cl
-; AVX-NOPOPCNT-NEXT: movzbl %cl, %eax
-; AVX-NOPOPCNT-NEXT: vpinsrb $3, %eax, %xmm2, %xmm2
-; AVX-NOPOPCNT-NEXT: vpextrb $4, %xmm0, %eax
-; AVX-NOPOPCNT-NEXT: movb %al, %cl
-; AVX-NOPOPCNT-NEXT: shrb %cl
-; AVX-NOPOPCNT-NEXT: andb $85, %cl
-; AVX-NOPOPCNT-NEXT: subb %cl, %al
-; AVX-NOPOPCNT-NEXT: movb %al, %cl
-; AVX-NOPOPCNT-NEXT: andb $51, %cl
-; AVX-NOPOPCNT-NEXT: shrb $2, %al
-; AVX-NOPOPCNT-NEXT: andb $51, %al
-; AVX-NOPOPCNT-NEXT: addb %cl, %al
-; AVX-NOPOPCNT-NEXT: movb %al, %cl
-; AVX-NOPOPCNT-NEXT: shrb $4, %cl
-; AVX-NOPOPCNT-NEXT: addb %al, %cl
-; AVX-NOPOPCNT-NEXT: andb $15, %cl
-; AVX-NOPOPCNT-NEXT: movzbl %cl, %eax
-; AVX-NOPOPCNT-NEXT: vpinsrb $4, %eax, %xmm2, %xmm2
-; AVX-NOPOPCNT-NEXT: vpextrb $5, %xmm0, %eax
-; AVX-NOPOPCNT-NEXT: movb %al, %cl
-; AVX-NOPOPCNT-NEXT: shrb %cl
-; AVX-NOPOPCNT-NEXT: andb $85, %cl
-; AVX-NOPOPCNT-NEXT: subb %cl, %al
-; AVX-NOPOPCNT-NEXT: movb %al, %cl
-; AVX-NOPOPCNT-NEXT: andb $51, %cl
-; AVX-NOPOPCNT-NEXT: shrb $2, %al
-; AVX-NOPOPCNT-NEXT: andb $51, %al
-; AVX-NOPOPCNT-NEXT: addb %cl, %al
-; AVX-NOPOPCNT-NEXT: movb %al, %cl
-; AVX-NOPOPCNT-NEXT: shrb $4, %cl
-; AVX-NOPOPCNT-NEXT: addb %al, %cl
-; AVX-NOPOPCNT-NEXT: andb $15, %cl
-; AVX-NOPOPCNT-NEXT: movzbl %cl, %eax
-; AVX-NOPOPCNT-NEXT: vpinsrb $5, %eax, %xmm2, %xmm2
-; AVX-NOPOPCNT-NEXT: vpextrb $6, %xmm0, %eax
-; AVX-NOPOPCNT-NEXT: movb %al, %cl
-; AVX-NOPOPCNT-NEXT: shrb %cl
-; AVX-NOPOPCNT-NEXT: andb $85, %cl
-; AVX-NOPOPCNT-NEXT: subb %cl, %al
-; AVX-NOPOPCNT-NEXT: movb %al, %cl
-; AVX-NOPOPCNT-NEXT: andb $51, %cl
-; AVX-NOPOPCNT-NEXT: shrb $2, %al
-; AVX-NOPOPCNT-NEXT: andb $51, %al
-; AVX-NOPOPCNT-NEXT: addb %cl, %al
-; AVX-NOPOPCNT-NEXT: movb %al, %cl
-; AVX-NOPOPCNT-NEXT: shrb $4, %cl
-; AVX-NOPOPCNT-NEXT: addb %al, %cl
-; AVX-NOPOPCNT-NEXT: andb $15, %cl
-; AVX-NOPOPCNT-NEXT: movzbl %cl, %eax
-; AVX-NOPOPCNT-NEXT: vpinsrb $6, %eax, %xmm2, %xmm2
-; AVX-NOPOPCNT-NEXT: vpextrb $7, %xmm0, %eax
-; AVX-NOPOPCNT-NEXT: movb %al, %cl
-; AVX-NOPOPCNT-NEXT: shrb %cl
-; AVX-NOPOPCNT-NEXT: andb $85, %cl
-; AVX-NOPOPCNT-NEXT: subb %cl, %al
-; AVX-NOPOPCNT-NEXT: movb %al, %cl
-; AVX-NOPOPCNT-NEXT: andb $51, %cl
-; AVX-NOPOPCNT-NEXT: shrb $2, %al
-; AVX-NOPOPCNT-NEXT: andb $51, %al
-; AVX-NOPOPCNT-NEXT: addb %cl, %al
-; AVX-NOPOPCNT-NEXT: movb %al, %cl
-; AVX-NOPOPCNT-NEXT: shrb $4, %cl
-; AVX-NOPOPCNT-NEXT: addb %al, %cl
-; AVX-NOPOPCNT-NEXT: andb $15, %cl
-; AVX-NOPOPCNT-NEXT: movzbl %cl, %eax
-; AVX-NOPOPCNT-NEXT: vpinsrb $7, %eax, %xmm2, %xmm2
-; AVX-NOPOPCNT-NEXT: vpextrb $8, %xmm0, %eax
-; AVX-NOPOPCNT-NEXT: movb %al, %cl
-; AVX-NOPOPCNT-NEXT: shrb %cl
-; AVX-NOPOPCNT-NEXT: andb $85, %cl
-; AVX-NOPOPCNT-NEXT: subb %cl, %al
-; AVX-NOPOPCNT-NEXT: movb %al, %cl
-; AVX-NOPOPCNT-NEXT: andb $51, %cl
-; AVX-NOPOPCNT-NEXT: shrb $2, %al
-; AVX-NOPOPCNT-NEXT: andb $51, %al
-; AVX-NOPOPCNT-NEXT: addb %cl, %al
-; AVX-NOPOPCNT-NEXT: movb %al, %cl
-; AVX-NOPOPCNT-NEXT: shrb $4, %cl
-; AVX-NOPOPCNT-NEXT: addb %al, %cl
-; AVX-NOPOPCNT-NEXT: andb $15, %cl
-; AVX-NOPOPCNT-NEXT: movzbl %cl, %eax
-; AVX-NOPOPCNT-NEXT: vpinsrb $8, %eax, %xmm2, %xmm2
-; AVX-NOPOPCNT-NEXT: vpextrb $9, %xmm0, %eax
-; AVX-NOPOPCNT-NEXT: movb %al, %cl
-; AVX-NOPOPCNT-NEXT: shrb %cl
-; AVX-NOPOPCNT-NEXT: andb $85, %cl
-; AVX-NOPOPCNT-NEXT: subb %cl, %al
-; AVX-NOPOPCNT-NEXT: movb %al, %cl
-; AVX-NOPOPCNT-NEXT: andb $51, %cl
-; AVX-NOPOPCNT-NEXT: shrb $2, %al
-; AVX-NOPOPCNT-NEXT: andb $51, %al
-; AVX-NOPOPCNT-NEXT: addb %cl, %al
-; AVX-NOPOPCNT-NEXT: movb %al, %cl
-; AVX-NOPOPCNT-NEXT: shrb $4, %cl
-; AVX-NOPOPCNT-NEXT: addb %al, %cl
-; AVX-NOPOPCNT-NEXT: andb $15, %cl
-; AVX-NOPOPCNT-NEXT: movzbl %cl, %eax
-; AVX-NOPOPCNT-NEXT: vpinsrb $9, %eax, %xmm2, %xmm2
-; AVX-NOPOPCNT-NEXT: vpextrb $10, %xmm0, %eax
-; AVX-NOPOPCNT-NEXT: movb %al, %cl
-; AVX-NOPOPCNT-NEXT: shrb %cl
-; AVX-NOPOPCNT-NEXT: andb $85, %cl
-; AVX-NOPOPCNT-NEXT: subb %cl, %al
-; AVX-NOPOPCNT-NEXT: movb %al, %cl
-; AVX-NOPOPCNT-NEXT: andb $51, %cl
-; AVX-NOPOPCNT-NEXT: shrb $2, %al
-; AVX-NOPOPCNT-NEXT: andb $51, %al
-; AVX-NOPOPCNT-NEXT: addb %cl, %al
-; AVX-NOPOPCNT-NEXT: movb %al, %cl
-; AVX-NOPOPCNT-NEXT: shrb $4, %cl
-; AVX-NOPOPCNT-NEXT: addb %al, %cl
-; AVX-NOPOPCNT-NEXT: andb $15, %cl
-; AVX-NOPOPCNT-NEXT: movzbl %cl, %eax
-; AVX-NOPOPCNT-NEXT: vpinsrb $10, %eax, %xmm2, %xmm2
-; AVX-NOPOPCNT-NEXT: vpextrb $11, %xmm0, %eax
-; AVX-NOPOPCNT-NEXT: movb %al, %cl
-; AVX-NOPOPCNT-NEXT: shrb %cl
-; AVX-NOPOPCNT-NEXT: andb $85, %cl
-; AVX-NOPOPCNT-NEXT: subb %cl, %al
-; AVX-NOPOPCNT-NEXT: movb %al, %cl
-; AVX-NOPOPCNT-NEXT: andb $51, %cl
-; AVX-NOPOPCNT-NEXT: shrb $2, %al
-; AVX-NOPOPCNT-NEXT: andb $51, %al
-; AVX-NOPOPCNT-NEXT: addb %cl, %al
-; AVX-NOPOPCNT-NEXT: movb %al, %cl
-; AVX-NOPOPCNT-NEXT: shrb $4, %cl
-; AVX-NOPOPCNT-NEXT: addb %al, %cl
-; AVX-NOPOPCNT-NEXT: andb $15, %cl
-; AVX-NOPOPCNT-NEXT: movzbl %cl, %eax
-; AVX-NOPOPCNT-NEXT: vpinsrb $11, %eax, %xmm2, %xmm2
-; AVX-NOPOPCNT-NEXT: vpextrb $12, %xmm0, %eax
-; AVX-NOPOPCNT-NEXT: movb %al, %cl
-; AVX-NOPOPCNT-NEXT: shrb %cl
-; AVX-NOPOPCNT-NEXT: andb $85, %cl
-; AVX-NOPOPCNT-NEXT: subb %cl, %al
-; AVX-NOPOPCNT-NEXT: movb %al, %cl
-; AVX-NOPOPCNT-NEXT: andb $51, %cl
-; AVX-NOPOPCNT-NEXT: shrb $2, %al
-; AVX-NOPOPCNT-NEXT: andb $51, %al
-; AVX-NOPOPCNT-NEXT: addb %cl, %al
-; AVX-NOPOPCNT-NEXT: movb %al, %cl
-; AVX-NOPOPCNT-NEXT: shrb $4, %cl
-; AVX-NOPOPCNT-NEXT: addb %al, %cl
-; AVX-NOPOPCNT-NEXT: andb $15, %cl
-; AVX-NOPOPCNT-NEXT: movzbl %cl, %eax
-; AVX-NOPOPCNT-NEXT: vpinsrb $12, %eax, %xmm2, %xmm2
-; AVX-NOPOPCNT-NEXT: vpextrb $13, %xmm0, %eax
-; AVX-NOPOPCNT-NEXT: movb %al, %cl
-; AVX-NOPOPCNT-NEXT: shrb %cl
-; AVX-NOPOPCNT-NEXT: andb $85, %cl
-; AVX-NOPOPCNT-NEXT: subb %cl, %al
-; AVX-NOPOPCNT-NEXT: movb %al, %cl
-; AVX-NOPOPCNT-NEXT: andb $51, %cl
-; AVX-NOPOPCNT-NEXT: shrb $2, %al
-; AVX-NOPOPCNT-NEXT: andb $51, %al
-; AVX-NOPOPCNT-NEXT: addb %cl, %al
-; AVX-NOPOPCNT-NEXT: movb %al, %cl
-; AVX-NOPOPCNT-NEXT: shrb $4, %cl
-; AVX-NOPOPCNT-NEXT: addb %al, %cl
-; AVX-NOPOPCNT-NEXT: andb $15, %cl
-; AVX-NOPOPCNT-NEXT: movzbl %cl, %eax
-; AVX-NOPOPCNT-NEXT: vpinsrb $13, %eax, %xmm2, %xmm2
-; AVX-NOPOPCNT-NEXT: vpextrb $14, %xmm0, %eax
-; AVX-NOPOPCNT-NEXT: movb %al, %cl
-; AVX-NOPOPCNT-NEXT: shrb %cl
-; AVX-NOPOPCNT-NEXT: andb $85, %cl
-; AVX-NOPOPCNT-NEXT: subb %cl, %al
-; AVX-NOPOPCNT-NEXT: movb %al, %cl
-; AVX-NOPOPCNT-NEXT: andb $51, %cl
-; AVX-NOPOPCNT-NEXT: shrb $2, %al
-; AVX-NOPOPCNT-NEXT: andb $51, %al
-; AVX-NOPOPCNT-NEXT: addb %cl, %al
-; AVX-NOPOPCNT-NEXT: movb %al, %cl
-; AVX-NOPOPCNT-NEXT: shrb $4, %cl
-; AVX-NOPOPCNT-NEXT: addb %al, %cl
-; AVX-NOPOPCNT-NEXT: andb $15, %cl
-; AVX-NOPOPCNT-NEXT: movzbl %cl, %eax
-; AVX-NOPOPCNT-NEXT: vpinsrb $14, %eax, %xmm2, %xmm2
-; AVX-NOPOPCNT-NEXT: vpextrb $15, %xmm0, %eax
-; AVX-NOPOPCNT-NEXT: movb %al, %cl
-; AVX-NOPOPCNT-NEXT: shrb %cl
-; AVX-NOPOPCNT-NEXT: andb $85, %cl
-; AVX-NOPOPCNT-NEXT: subb %cl, %al
-; AVX-NOPOPCNT-NEXT: movb %al, %cl
-; AVX-NOPOPCNT-NEXT: andb $51, %cl
-; AVX-NOPOPCNT-NEXT: shrb $2, %al
-; AVX-NOPOPCNT-NEXT: andb $51, %al
-; AVX-NOPOPCNT-NEXT: addb %cl, %al
-; AVX-NOPOPCNT-NEXT: movb %al, %cl
-; AVX-NOPOPCNT-NEXT: shrb $4, %cl
-; AVX-NOPOPCNT-NEXT: addb %al, %cl
-; AVX-NOPOPCNT-NEXT: andb $15, %cl
-; AVX-NOPOPCNT-NEXT: movzbl %cl, %eax
-; AVX-NOPOPCNT-NEXT: vpinsrb $15, %eax, %xmm2, %xmm0
-; AVX-NOPOPCNT-NEXT: vinsertf128 $1, %xmm1, %ymm0, %ymm0
-; AVX-NOPOPCNT-NEXT: retq
- %out = call <32 x i8> @llvm.ctpop.v32i8(<32 x i8> %in)
- ret <32 x i8> %out
-}
-
-define <4 x i64> @testv4i64(<4 x i64> %in) {
-; AVX-POPCNT-LABEL: testv4i64:
-; AVX-POPCNT: # BB#0:
-; AVX-POPCNT-NEXT: vextractf128 $1, %ymm0, %xmm1
-; AVX-POPCNT-NEXT: vpextrq $1, %xmm1, %rax
-; AVX-POPCNT-NEXT: popcntq %rax, %rax
-; AVX-POPCNT-NEXT: vmovq %rax, %xmm2
-; AVX-POPCNT-NEXT: vmovq %xmm1, %rax
-; AVX-POPCNT-NEXT: popcntq %rax, %rax
-; AVX-POPCNT-NEXT: vmovq %rax, %xmm1
-; AVX-POPCNT-NEXT: vpunpcklqdq {{.*#+}} xmm1 = xmm1[0],xmm2[0]
-; AVX-POPCNT-NEXT: vpextrq $1, %xmm0, %rax
-; AVX-POPCNT-NEXT: popcntq %rax, %rax
-; AVX-POPCNT-NEXT: vmovq %rax, %xmm2
-; AVX-POPCNT-NEXT: vmovq %xmm0, %rax
-; AVX-POPCNT-NEXT: popcntq %rax, %rax
-; AVX-POPCNT-NEXT: vmovq %rax, %xmm0
-; AVX-POPCNT-NEXT: vpunpcklqdq {{.*#+}} xmm0 = xmm0[0],xmm2[0]
-; AVX-POPCNT-NEXT: vinsertf128 $1, %xmm1, %ymm0, %ymm0
-; AVX-POPCNT-NEXT: retq
-;
-; AVX-NOPOPCNT-LABEL: testv4i64:
-; AVX-NOPOPCNT: # BB#0:
-; AVX-NOPOPCNT-NEXT: vextractf128 $1, %ymm0, %xmm1
-; AVX-NOPOPCNT-NEXT: vpextrq $1, %xmm1, %rdx
-; AVX-NOPOPCNT-NEXT: movq %rdx, %rax
-; AVX-NOPOPCNT-NEXT: shrq %rax
-; AVX-NOPOPCNT-NEXT: movabsq $6148914691236517205, %r8 # imm = 0x5555555555555555
-; AVX-NOPOPCNT-NEXT: andq %r8, %rax
-; AVX-NOPOPCNT-NEXT: subq %rax, %rdx
-; AVX-NOPOPCNT-NEXT: movabsq $3689348814741910323, %rax # imm = 0x3333333333333333
-; AVX-NOPOPCNT-NEXT: movq %rdx, %rsi
-; AVX-NOPOPCNT-NEXT: andq %rax, %rsi
-; AVX-NOPOPCNT-NEXT: shrq $2, %rdx
-; AVX-NOPOPCNT-NEXT: andq %rax, %rdx
-; AVX-NOPOPCNT-NEXT: addq %rsi, %rdx
-; AVX-NOPOPCNT-NEXT: movq %rdx, %rdi
-; AVX-NOPOPCNT-NEXT: shrq $4, %rdi
-; AVX-NOPOPCNT-NEXT: addq %rdx, %rdi
-; AVX-NOPOPCNT-NEXT: movabsq $1085102592571150095, %rdx # imm = 0xF0F0F0F0F0F0F0F
-; AVX-NOPOPCNT-NEXT: andq %rdx, %rdi
-; AVX-NOPOPCNT-NEXT: movabsq $72340172838076673, %rsi # imm = 0x101010101010101
-; AVX-NOPOPCNT-NEXT: imulq %rsi, %rdi
-; AVX-NOPOPCNT-NEXT: shrq $56, %rdi
-; AVX-NOPOPCNT-NEXT: vmovq %rdi, %xmm2
-; AVX-NOPOPCNT-NEXT: vmovq %xmm1, %rcx
-; AVX-NOPOPCNT-NEXT: movq %rcx, %rdi
-; AVX-NOPOPCNT-NEXT: shrq %rdi
-; AVX-NOPOPCNT-NEXT: andq %r8, %rdi
-; AVX-NOPOPCNT-NEXT: subq %rdi, %rcx
-; AVX-NOPOPCNT-NEXT: movq %rcx, %rdi
-; AVX-NOPOPCNT-NEXT: andq %rax, %rdi
-; AVX-NOPOPCNT-NEXT: shrq $2, %rcx
-; AVX-NOPOPCNT-NEXT: andq %rax, %rcx
-; AVX-NOPOPCNT-NEXT: addq %rdi, %rcx
-; AVX-NOPOPCNT-NEXT: movq %rcx, %rdi
-; AVX-NOPOPCNT-NEXT: shrq $4, %rdi
-; AVX-NOPOPCNT-NEXT: addq %rcx, %rdi
-; AVX-NOPOPCNT-NEXT: andq %rdx, %rdi
-; AVX-NOPOPCNT-NEXT: imulq %rsi, %rdi
-; AVX-NOPOPCNT-NEXT: shrq $56, %rdi
-; AVX-NOPOPCNT-NEXT: vmovq %rdi, %xmm1
-; AVX-NOPOPCNT-NEXT: vpunpcklqdq {{.*#+}} xmm1 = xmm1[0],xmm2[0]
-; AVX-NOPOPCNT-NEXT: vpextrq $1, %xmm0, %rcx
-; AVX-NOPOPCNT-NEXT: movq %rcx, %rdi
-; AVX-NOPOPCNT-NEXT: shrq %rdi
-; AVX-NOPOPCNT-NEXT: andq %r8, %rdi
-; AVX-NOPOPCNT-NEXT: subq %rdi, %rcx
-; AVX-NOPOPCNT-NEXT: movq %rcx, %rdi
-; AVX-NOPOPCNT-NEXT: andq %rax, %rdi
-; AVX-NOPOPCNT-NEXT: shrq $2, %rcx
-; AVX-NOPOPCNT-NEXT: andq %rax, %rcx
-; AVX-NOPOPCNT-NEXT: addq %rdi, %rcx
-; AVX-NOPOPCNT-NEXT: movq %rcx, %rdi
-; AVX-NOPOPCNT-NEXT: shrq $4, %rdi
-; AVX-NOPOPCNT-NEXT: addq %rcx, %rdi
-; AVX-NOPOPCNT-NEXT: andq %rdx, %rdi
-; AVX-NOPOPCNT-NEXT: imulq %rsi, %rdi
-; AVX-NOPOPCNT-NEXT: shrq $56, %rdi
-; AVX-NOPOPCNT-NEXT: vmovq %rdi, %xmm2
-; AVX-NOPOPCNT-NEXT: vmovq %xmm0, %rcx
-; AVX-NOPOPCNT-NEXT: movq %rcx, %rdi
-; AVX-NOPOPCNT-NEXT: shrq %rdi
-; AVX-NOPOPCNT-NEXT: andq %r8, %rdi
-; AVX-NOPOPCNT-NEXT: subq %rdi, %rcx
-; AVX-NOPOPCNT-NEXT: movq %rcx, %rdi
-; AVX-NOPOPCNT-NEXT: andq %rax, %rdi
-; AVX-NOPOPCNT-NEXT: shrq $2, %rcx
-; AVX-NOPOPCNT-NEXT: andq %rax, %rcx
-; AVX-NOPOPCNT-NEXT: addq %rdi, %rcx
-; AVX-NOPOPCNT-NEXT: movq %rcx, %rax
-; AVX-NOPOPCNT-NEXT: shrq $4, %rax
-; AVX-NOPOPCNT-NEXT: addq %rcx, %rax
-; AVX-NOPOPCNT-NEXT: andq %rdx, %rax
-; AVX-NOPOPCNT-NEXT: imulq %rsi, %rax
-; AVX-NOPOPCNT-NEXT: shrq $56, %rax
-; AVX-NOPOPCNT-NEXT: vmovq %rax, %xmm0
-; AVX-NOPOPCNT-NEXT: vpunpcklqdq {{.*#+}} xmm0 = xmm0[0],xmm2[0]
-; AVX-NOPOPCNT-NEXT: vinsertf128 $1, %xmm1, %ymm0, %ymm0
-; AVX-NOPOPCNT-NEXT: retq
- %out = call <4 x i64> @llvm.ctpop.v4i64(<4 x i64> %in)
- ret <4 x i64> %out
-}
-
-define <8 x i32> @testv8i32(<8 x i32> %in) {
-; AVX-POPCNT-LABEL: testv8i32:
-; AVX-POPCNT: # BB#0:
-; AVX-POPCNT-NEXT: vextractf128 $1, %ymm0, %xmm1
-; AVX-POPCNT-NEXT: vpextrd $1, %xmm1, %eax
-; AVX-POPCNT-NEXT: popcntl %eax, %eax
-; AVX-POPCNT-NEXT: vmovd %xmm1, %ecx
-; AVX-POPCNT-NEXT: popcntl %ecx, %ecx
-; AVX-POPCNT-NEXT: vmovd %ecx, %xmm2
-; AVX-POPCNT-NEXT: vpinsrd $1, %eax, %xmm2, %xmm2
-; AVX-POPCNT-NEXT: vpextrd $2, %xmm1, %eax
-; AVX-POPCNT-NEXT: popcntl %eax, %eax
-; AVX-POPCNT-NEXT: vpinsrd $2, %eax, %xmm2, %xmm2
-; AVX-POPCNT-NEXT: vpextrd $3, %xmm1, %eax
-; AVX-POPCNT-NEXT: popcntl %eax, %eax
-; AVX-POPCNT-NEXT: vpinsrd $3, %eax, %xmm2, %xmm1
-; AVX-POPCNT-NEXT: vpextrd $1, %xmm0, %eax
-; AVX-POPCNT-NEXT: popcntl %eax, %eax
-; AVX-POPCNT-NEXT: vmovd %xmm0, %ecx
-; AVX-POPCNT-NEXT: popcntl %ecx, %ecx
-; AVX-POPCNT-NEXT: vmovd %ecx, %xmm2
-; AVX-POPCNT-NEXT: vpinsrd $1, %eax, %xmm2, %xmm2
-; AVX-POPCNT-NEXT: vpextrd $2, %xmm0, %eax
-; AVX-POPCNT-NEXT: popcntl %eax, %eax
-; AVX-POPCNT-NEXT: vpinsrd $2, %eax, %xmm2, %xmm2
-; AVX-POPCNT-NEXT: vpextrd $3, %xmm0, %eax
-; AVX-POPCNT-NEXT: popcntl %eax, %eax
-; AVX-POPCNT-NEXT: vpinsrd $3, %eax, %xmm2, %xmm0
-; AVX-POPCNT-NEXT: vinsertf128 $1, %xmm1, %ymm0, %ymm0
-; AVX-POPCNT-NEXT: retq
-;
-; AVX-NOPOPCNT-LABEL: testv8i32:
-; AVX-NOPOPCNT: # BB#0:
-; AVX-NOPOPCNT-NEXT: vextractf128 $1, %ymm0, %xmm1
-; AVX-NOPOPCNT-NEXT: vpextrd $1, %xmm1, %eax
-; AVX-NOPOPCNT-NEXT: movl %eax, %ecx
-; AVX-NOPOPCNT-NEXT: shrl %ecx
-; AVX-NOPOPCNT-NEXT: andl $1431655765, %ecx # imm = 0x55555555
-; AVX-NOPOPCNT-NEXT: subl %ecx, %eax
-; AVX-NOPOPCNT-NEXT: movl %eax, %ecx
-; AVX-NOPOPCNT-NEXT: andl $858993459, %ecx # imm = 0x33333333
-; AVX-NOPOPCNT-NEXT: shrl $2, %eax
-; AVX-NOPOPCNT-NEXT: andl $858993459, %eax # imm = 0x33333333
-; AVX-NOPOPCNT-NEXT: addl %ecx, %eax
-; AVX-NOPOPCNT-NEXT: movl %eax, %ecx
-; AVX-NOPOPCNT-NEXT: shrl $4, %ecx
-; AVX-NOPOPCNT-NEXT: addl %eax, %ecx
-; AVX-NOPOPCNT-NEXT: andl $252645135, %ecx # imm = 0xF0F0F0F
-; AVX-NOPOPCNT-NEXT: imull $16843009, %ecx, %eax # imm = 0x1010101
-; AVX-NOPOPCNT-NEXT: shrl $24, %eax
-; AVX-NOPOPCNT-NEXT: vmovd %xmm1, %ecx
-; AVX-NOPOPCNT-NEXT: movl %ecx, %edx
-; AVX-NOPOPCNT-NEXT: shrl %edx
-; AVX-NOPOPCNT-NEXT: andl $1431655765, %edx # imm = 0x55555555
-; AVX-NOPOPCNT-NEXT: subl %edx, %ecx
-; AVX-NOPOPCNT-NEXT: movl %ecx, %edx
-; AVX-NOPOPCNT-NEXT: andl $858993459, %edx # imm = 0x33333333
-; AVX-NOPOPCNT-NEXT: shrl $2, %ecx
-; AVX-NOPOPCNT-NEXT: andl $858993459, %ecx # imm = 0x33333333
-; AVX-NOPOPCNT-NEXT: addl %edx, %ecx
-; AVX-NOPOPCNT-NEXT: movl %ecx, %edx
-; AVX-NOPOPCNT-NEXT: shrl $4, %edx
-; AVX-NOPOPCNT-NEXT: addl %ecx, %edx
-; AVX-NOPOPCNT-NEXT: andl $252645135, %edx # imm = 0xF0F0F0F
-; AVX-NOPOPCNT-NEXT: imull $16843009, %edx, %ecx # imm = 0x1010101
-; AVX-NOPOPCNT-NEXT: shrl $24, %ecx
-; AVX-NOPOPCNT-NEXT: vmovd %ecx, %xmm2
-; AVX-NOPOPCNT-NEXT: vpinsrd $1, %eax, %xmm2, %xmm2
-; AVX-NOPOPCNT-NEXT: vpextrd $2, %xmm1, %eax
-; AVX-NOPOPCNT-NEXT: movl %eax, %ecx
-; AVX-NOPOPCNT-NEXT: shrl %ecx
-; AVX-NOPOPCNT-NEXT: andl $1431655765, %ecx # imm = 0x55555555
-; AVX-NOPOPCNT-NEXT: subl %ecx, %eax
-; AVX-NOPOPCNT-NEXT: movl %eax, %ecx
-; AVX-NOPOPCNT-NEXT: andl $858993459, %ecx # imm = 0x33333333
-; AVX-NOPOPCNT-NEXT: shrl $2, %eax
-; AVX-NOPOPCNT-NEXT: andl $858993459, %eax # imm = 0x33333333
-; AVX-NOPOPCNT-NEXT: addl %ecx, %eax
-; AVX-NOPOPCNT-NEXT: movl %eax, %ecx
-; AVX-NOPOPCNT-NEXT: shrl $4, %ecx
-; AVX-NOPOPCNT-NEXT: addl %eax, %ecx
-; AVX-NOPOPCNT-NEXT: andl $252645135, %ecx # imm = 0xF0F0F0F
-; AVX-NOPOPCNT-NEXT: imull $16843009, %ecx, %eax # imm = 0x1010101
-; AVX-NOPOPCNT-NEXT: shrl $24, %eax
-; AVX-NOPOPCNT-NEXT: vpinsrd $2, %eax, %xmm2, %xmm2
-; AVX-NOPOPCNT-NEXT: vpextrd $3, %xmm1, %eax
-; AVX-NOPOPCNT-NEXT: movl %eax, %ecx
-; AVX-NOPOPCNT-NEXT: shrl %ecx
-; AVX-NOPOPCNT-NEXT: andl $1431655765, %ecx # imm = 0x55555555
-; AVX-NOPOPCNT-NEXT: subl %ecx, %eax
-; AVX-NOPOPCNT-NEXT: movl %eax, %ecx
-; AVX-NOPOPCNT-NEXT: andl $858993459, %ecx # imm = 0x33333333
-; AVX-NOPOPCNT-NEXT: shrl $2, %eax
-; AVX-NOPOPCNT-NEXT: andl $858993459, %eax # imm = 0x33333333
-; AVX-NOPOPCNT-NEXT: addl %ecx, %eax
-; AVX-NOPOPCNT-NEXT: movl %eax, %ecx
-; AVX-NOPOPCNT-NEXT: shrl $4, %ecx
-; AVX-NOPOPCNT-NEXT: addl %eax, %ecx
-; AVX-NOPOPCNT-NEXT: andl $252645135, %ecx # imm = 0xF0F0F0F
-; AVX-NOPOPCNT-NEXT: imull $16843009, %ecx, %eax # imm = 0x1010101
-; AVX-NOPOPCNT-NEXT: shrl $24, %eax
-; AVX-NOPOPCNT-NEXT: vpinsrd $3, %eax, %xmm2, %xmm1
-; AVX-NOPOPCNT-NEXT: vpextrd $1, %xmm0, %eax
-; AVX-NOPOPCNT-NEXT: movl %eax, %ecx
-; AVX-NOPOPCNT-NEXT: shrl %ecx
-; AVX-NOPOPCNT-NEXT: andl $1431655765, %ecx # imm = 0x55555555
-; AVX-NOPOPCNT-NEXT: subl %ecx, %eax
-; AVX-NOPOPCNT-NEXT: movl %eax, %ecx
-; AVX-NOPOPCNT-NEXT: andl $858993459, %ecx # imm = 0x33333333
-; AVX-NOPOPCNT-NEXT: shrl $2, %eax
-; AVX-NOPOPCNT-NEXT: andl $858993459, %eax # imm = 0x33333333
-; AVX-NOPOPCNT-NEXT: addl %ecx, %eax
-; AVX-NOPOPCNT-NEXT: movl %eax, %ecx
-; AVX-NOPOPCNT-NEXT: shrl $4, %ecx
-; AVX-NOPOPCNT-NEXT: addl %eax, %ecx
-; AVX-NOPOPCNT-NEXT: andl $252645135, %ecx # imm = 0xF0F0F0F
-; AVX-NOPOPCNT-NEXT: imull $16843009, %ecx, %eax # imm = 0x1010101
-; AVX-NOPOPCNT-NEXT: shrl $24, %eax
-; AVX-NOPOPCNT-NEXT: vmovd %xmm0, %ecx
-; AVX-NOPOPCNT-NEXT: movl %ecx, %edx
-; AVX-NOPOPCNT-NEXT: shrl %edx
-; AVX-NOPOPCNT-NEXT: andl $1431655765, %edx # imm = 0x55555555
-; AVX-NOPOPCNT-NEXT: subl %edx, %ecx
-; AVX-NOPOPCNT-NEXT: movl %ecx, %edx
-; AVX-NOPOPCNT-NEXT: andl $858993459, %edx # imm = 0x33333333
-; AVX-NOPOPCNT-NEXT: shrl $2, %ecx
-; AVX-NOPOPCNT-NEXT: andl $858993459, %ecx # imm = 0x33333333
-; AVX-NOPOPCNT-NEXT: addl %edx, %ecx
-; AVX-NOPOPCNT-NEXT: movl %ecx, %edx
-; AVX-NOPOPCNT-NEXT: shrl $4, %edx
-; AVX-NOPOPCNT-NEXT: addl %ecx, %edx
-; AVX-NOPOPCNT-NEXT: andl $252645135, %edx # imm = 0xF0F0F0F
-; AVX-NOPOPCNT-NEXT: imull $16843009, %edx, %ecx # imm = 0x1010101
-; AVX-NOPOPCNT-NEXT: shrl $24, %ecx
-; AVX-NOPOPCNT-NEXT: vmovd %ecx, %xmm2
-; AVX-NOPOPCNT-NEXT: vpinsrd $1, %eax, %xmm2, %xmm2
-; AVX-NOPOPCNT-NEXT: vpextrd $2, %xmm0, %eax
-; AVX-NOPOPCNT-NEXT: movl %eax, %ecx
-; AVX-NOPOPCNT-NEXT: shrl %ecx
-; AVX-NOPOPCNT-NEXT: andl $1431655765, %ecx # imm = 0x55555555
-; AVX-NOPOPCNT-NEXT: subl %ecx, %eax
-; AVX-NOPOPCNT-NEXT: movl %eax, %ecx
-; AVX-NOPOPCNT-NEXT: andl $858993459, %ecx # imm = 0x33333333
-; AVX-NOPOPCNT-NEXT: shrl $2, %eax
-; AVX-NOPOPCNT-NEXT: andl $858993459, %eax # imm = 0x33333333
-; AVX-NOPOPCNT-NEXT: addl %ecx, %eax
-; AVX-NOPOPCNT-NEXT: movl %eax, %ecx
-; AVX-NOPOPCNT-NEXT: shrl $4, %ecx
-; AVX-NOPOPCNT-NEXT: addl %eax, %ecx
-; AVX-NOPOPCNT-NEXT: andl $252645135, %ecx # imm = 0xF0F0F0F
-; AVX-NOPOPCNT-NEXT: imull $16843009, %ecx, %eax # imm = 0x1010101
-; AVX-NOPOPCNT-NEXT: shrl $24, %eax
-; AVX-NOPOPCNT-NEXT: vpinsrd $2, %eax, %xmm2, %xmm2
-; AVX-NOPOPCNT-NEXT: vpextrd $3, %xmm0, %eax
-; AVX-NOPOPCNT-NEXT: movl %eax, %ecx
-; AVX-NOPOPCNT-NEXT: shrl %ecx
-; AVX-NOPOPCNT-NEXT: andl $1431655765, %ecx # imm = 0x55555555
-; AVX-NOPOPCNT-NEXT: subl %ecx, %eax
-; AVX-NOPOPCNT-NEXT: movl %eax, %ecx
-; AVX-NOPOPCNT-NEXT: andl $858993459, %ecx # imm = 0x33333333
-; AVX-NOPOPCNT-NEXT: shrl $2, %eax
-; AVX-NOPOPCNT-NEXT: andl $858993459, %eax # imm = 0x33333333
-; AVX-NOPOPCNT-NEXT: addl %ecx, %eax
-; AVX-NOPOPCNT-NEXT: movl %eax, %ecx
-; AVX-NOPOPCNT-NEXT: shrl $4, %ecx
-; AVX-NOPOPCNT-NEXT: addl %eax, %ecx
-; AVX-NOPOPCNT-NEXT: andl $252645135, %ecx # imm = 0xF0F0F0F
-; AVX-NOPOPCNT-NEXT: imull $16843009, %ecx, %eax # imm = 0x1010101
-; AVX-NOPOPCNT-NEXT: shrl $24, %eax
-; AVX-NOPOPCNT-NEXT: vpinsrd $3, %eax, %xmm2, %xmm0
-; AVX-NOPOPCNT-NEXT: vinsertf128 $1, %xmm1, %ymm0, %ymm0
-; AVX-NOPOPCNT-NEXT: retq
- %out = call <8 x i32> @llvm.ctpop.v8i32(<8 x i32> %in)
- ret <8 x i32> %out
-}
-
-define <2 x i64> @testv2i64(<2 x i64> %in) {
-; AVX-POPCNT-LABEL: testv2i64:
-; AVX-POPCNT: # BB#0:
-; AVX-POPCNT-NEXT: vpextrq $1, %xmm0, %rax
-; AVX-POPCNT-NEXT: popcntq %rax, %rax
-; AVX-POPCNT-NEXT: vmovq %rax, %xmm1
-; AVX-POPCNT-NEXT: vmovq %xmm0, %rax
-; AVX-POPCNT-NEXT: popcntq %rax, %rax
-; AVX-POPCNT-NEXT: vmovq %rax, %xmm0
-; AVX-POPCNT-NEXT: vpunpcklqdq {{.*#+}} xmm0 = xmm0[0],xmm1[0]
-; AVX-POPCNT-NEXT: retq
-;
-; AVX-NOPOPCNT-LABEL: testv2i64:
-; AVX-NOPOPCNT: # BB#0:
-; AVX-NOPOPCNT-NEXT: vpsrlq $1, %xmm0, %xmm1
-; AVX-NOPOPCNT-NEXT: vpand {{.*}}(%rip), %xmm1, %xmm1
-; AVX-NOPOPCNT-NEXT: vpsubq %xmm1, %xmm0, %xmm0
-; AVX-NOPOPCNT-NEXT: vmovdqa {{.*#+}} xmm1 = [3689348814741910323,3689348814741910323]
-; AVX-NOPOPCNT-NEXT: vpand %xmm1, %xmm0, %xmm2
-; AVX-NOPOPCNT-NEXT: vpsrlq $2, %xmm0, %xmm0
-; AVX-NOPOPCNT-NEXT: vpand %xmm1, %xmm0, %xmm0
-; AVX-NOPOPCNT-NEXT: vpaddq %xmm0, %xmm2, %xmm0
-; AVX-NOPOPCNT-NEXT: vpsrlq $4, %xmm0, %xmm1
-; AVX-NOPOPCNT-NEXT: vpaddq %xmm1, %xmm0, %xmm0
-; AVX-NOPOPCNT-NEXT: vpand {{.*}}(%rip), %xmm0, %xmm0
-; AVX-NOPOPCNT-NEXT: vpsrlq $8, %xmm0, %xmm1
-; AVX-NOPOPCNT-NEXT: vpaddq %xmm1, %xmm0, %xmm0
-; AVX-NOPOPCNT-NEXT: vpsrlq $16, %xmm0, %xmm1
-; AVX-NOPOPCNT-NEXT: vpaddq %xmm1, %xmm0, %xmm0
-; AVX-NOPOPCNT-NEXT: vpsrlq $32, %xmm0, %xmm1
-; AVX-NOPOPCNT-NEXT: vpaddq %xmm1, %xmm0, %xmm0
-; AVX-NOPOPCNT-NEXT: vpand {{.*}}(%rip), %xmm0, %xmm0
-; AVX-NOPOPCNT-NEXT: retq
- %out = call <2 x i64> @llvm.ctpop.v2i64(<2 x i64> %in)
- ret <2 x i64> %out
-}
-
-define <16 x i8> @testv16i8(<16 x i8> %in) {
-; AVX-POPCNT-LABEL: testv16i8:
-; AVX-POPCNT: # BB#0:
-; AVX-POPCNT-NEXT: vpextrb $1, %xmm0, %eax
-; AVX-POPCNT-NEXT: popcntw %ax, %ax
-; AVX-POPCNT-NEXT: vpextrb $0, %xmm0, %ecx
-; AVX-POPCNT-NEXT: popcntw %cx, %cx
-; AVX-POPCNT-NEXT: vmovd %ecx, %xmm1
-; AVX-POPCNT-NEXT: vpinsrb $1, %eax, %xmm1, %xmm1
-; AVX-POPCNT-NEXT: vpextrb $2, %xmm0, %eax
-; AVX-POPCNT-NEXT: popcntw %ax, %ax
-; AVX-POPCNT-NEXT: vpinsrb $2, %eax, %xmm1, %xmm1
-; AVX-POPCNT-NEXT: vpextrb $3, %xmm0, %eax
-; AVX-POPCNT-NEXT: popcntw %ax, %ax
-; AVX-POPCNT-NEXT: vpinsrb $3, %eax, %xmm1, %xmm1
-; AVX-POPCNT-NEXT: vpextrb $4, %xmm0, %eax
-; AVX-POPCNT-NEXT: popcntw %ax, %ax
-; AVX-POPCNT-NEXT: vpinsrb $4, %eax, %xmm1, %xmm1
-; AVX-POPCNT-NEXT: vpextrb $5, %xmm0, %eax
-; AVX-POPCNT-NEXT: popcntw %ax, %ax
-; AVX-POPCNT-NEXT: vpinsrb $5, %eax, %xmm1, %xmm1
-; AVX-POPCNT-NEXT: vpextrb $6, %xmm0, %eax
-; AVX-POPCNT-NEXT: popcntw %ax, %ax
-; AVX-POPCNT-NEXT: vpinsrb $6, %eax, %xmm1, %xmm1
-; AVX-POPCNT-NEXT: vpextrb $7, %xmm0, %eax
-; AVX-POPCNT-NEXT: popcntw %ax, %ax
-; AVX-POPCNT-NEXT: vpinsrb $7, %eax, %xmm1, %xmm1
-; AVX-POPCNT-NEXT: vpextrb $8, %xmm0, %eax
-; AVX-POPCNT-NEXT: popcntw %ax, %ax
-; AVX-POPCNT-NEXT: vpinsrb $8, %eax, %xmm1, %xmm1
-; AVX-POPCNT-NEXT: vpextrb $9, %xmm0, %eax
-; AVX-POPCNT-NEXT: popcntw %ax, %ax
-; AVX-POPCNT-NEXT: vpinsrb $9, %eax, %xmm1, %xmm1
-; AVX-POPCNT-NEXT: vpextrb $10, %xmm0, %eax
-; AVX-POPCNT-NEXT: popcntw %ax, %ax
-; AVX-POPCNT-NEXT: vpinsrb $10, %eax, %xmm1, %xmm1
-; AVX-POPCNT-NEXT: vpextrb $11, %xmm0, %eax
-; AVX-POPCNT-NEXT: popcntw %ax, %ax
-; AVX-POPCNT-NEXT: vpinsrb $11, %eax, %xmm1, %xmm1
-; AVX-POPCNT-NEXT: vpextrb $12, %xmm0, %eax
-; AVX-POPCNT-NEXT: popcntw %ax, %ax
-; AVX-POPCNT-NEXT: vpinsrb $12, %eax, %xmm1, %xmm1
-; AVX-POPCNT-NEXT: vpextrb $13, %xmm0, %eax
-; AVX-POPCNT-NEXT: popcntw %ax, %ax
-; AVX-POPCNT-NEXT: vpinsrb $13, %eax, %xmm1, %xmm1
-; AVX-POPCNT-NEXT: vpextrb $14, %xmm0, %eax
-; AVX-POPCNT-NEXT: popcntw %ax, %ax
-; AVX-POPCNT-NEXT: vpinsrb $14, %eax, %xmm1, %xmm1
-; AVX-POPCNT-NEXT: vpextrb $15, %xmm0, %eax
-; AVX-POPCNT-NEXT: popcntw %ax, %ax
-; AVX-POPCNT-NEXT: vpinsrb $15, %eax, %xmm1, %xmm0
-; AVX-POPCNT-NEXT: retq
-;
-; AVX-NOPOPCNT-LABEL: testv16i8:
-; AVX-NOPOPCNT: # BB#0:
-; AVX-NOPOPCNT-NEXT: vpextrb $1, %xmm0, %eax
-; AVX-NOPOPCNT-NEXT: movb %al, %cl
-; AVX-NOPOPCNT-NEXT: shrb %cl
-; AVX-NOPOPCNT-NEXT: andb $85, %cl
-; AVX-NOPOPCNT-NEXT: subb %cl, %al
-; AVX-NOPOPCNT-NEXT: movb %al, %cl
-; AVX-NOPOPCNT-NEXT: andb $51, %cl
-; AVX-NOPOPCNT-NEXT: shrb $2, %al
-; AVX-NOPOPCNT-NEXT: andb $51, %al
-; AVX-NOPOPCNT-NEXT: addb %cl, %al
-; AVX-NOPOPCNT-NEXT: movb %al, %cl
-; AVX-NOPOPCNT-NEXT: shrb $4, %cl
-; AVX-NOPOPCNT-NEXT: addb %al, %cl
-; AVX-NOPOPCNT-NEXT: andb $15, %cl
-; AVX-NOPOPCNT-NEXT: movzbl %cl, %eax
-; AVX-NOPOPCNT-NEXT: vpextrb $0, %xmm0, %ecx
-; AVX-NOPOPCNT-NEXT: movb %cl, %dl
-; AVX-NOPOPCNT-NEXT: shrb %dl
-; AVX-NOPOPCNT-NEXT: andb $85, %dl
-; AVX-NOPOPCNT-NEXT: subb %dl, %cl
-; AVX-NOPOPCNT-NEXT: movb %cl, %dl
-; AVX-NOPOPCNT-NEXT: andb $51, %dl
-; AVX-NOPOPCNT-NEXT: shrb $2, %cl
-; AVX-NOPOPCNT-NEXT: andb $51, %cl
-; AVX-NOPOPCNT-NEXT: addb %dl, %cl
-; AVX-NOPOPCNT-NEXT: movb %cl, %dl
-; AVX-NOPOPCNT-NEXT: shrb $4, %dl
-; AVX-NOPOPCNT-NEXT: addb %cl, %dl
-; AVX-NOPOPCNT-NEXT: andb $15, %dl
-; AVX-NOPOPCNT-NEXT: movzbl %dl, %ecx
-; AVX-NOPOPCNT-NEXT: vmovd %ecx, %xmm1
-; AVX-NOPOPCNT-NEXT: vpinsrb $1, %eax, %xmm1, %xmm1
-; AVX-NOPOPCNT-NEXT: vpextrb $2, %xmm0, %eax
-; AVX-NOPOPCNT-NEXT: movb %al, %cl
-; AVX-NOPOPCNT-NEXT: shrb %cl
-; AVX-NOPOPCNT-NEXT: andb $85, %cl
-; AVX-NOPOPCNT-NEXT: subb %cl, %al
-; AVX-NOPOPCNT-NEXT: movb %al, %cl
-; AVX-NOPOPCNT-NEXT: andb $51, %cl
-; AVX-NOPOPCNT-NEXT: shrb $2, %al
-; AVX-NOPOPCNT-NEXT: andb $51, %al
-; AVX-NOPOPCNT-NEXT: addb %cl, %al
-; AVX-NOPOPCNT-NEXT: movb %al, %cl
-; AVX-NOPOPCNT-NEXT: shrb $4, %cl
-; AVX-NOPOPCNT-NEXT: addb %al, %cl
-; AVX-NOPOPCNT-NEXT: andb $15, %cl
-; AVX-NOPOPCNT-NEXT: movzbl %cl, %eax
-; AVX-NOPOPCNT-NEXT: vpinsrb $2, %eax, %xmm1, %xmm1
-; AVX-NOPOPCNT-NEXT: vpextrb $3, %xmm0, %eax
-; AVX-NOPOPCNT-NEXT: movb %al, %cl
-; AVX-NOPOPCNT-NEXT: shrb %cl
-; AVX-NOPOPCNT-NEXT: andb $85, %cl
-; AVX-NOPOPCNT-NEXT: subb %cl, %al
-; AVX-NOPOPCNT-NEXT: movb %al, %cl
-; AVX-NOPOPCNT-NEXT: andb $51, %cl
-; AVX-NOPOPCNT-NEXT: shrb $2, %al
-; AVX-NOPOPCNT-NEXT: andb $51, %al
-; AVX-NOPOPCNT-NEXT: addb %cl, %al
-; AVX-NOPOPCNT-NEXT: movb %al, %cl
-; AVX-NOPOPCNT-NEXT: shrb $4, %cl
-; AVX-NOPOPCNT-NEXT: addb %al, %cl
-; AVX-NOPOPCNT-NEXT: andb $15, %cl
-; AVX-NOPOPCNT-NEXT: movzbl %cl, %eax
-; AVX-NOPOPCNT-NEXT: vpinsrb $3, %eax, %xmm1, %xmm1
-; AVX-NOPOPCNT-NEXT: vpextrb $4, %xmm0, %eax
-; AVX-NOPOPCNT-NEXT: movb %al, %cl
-; AVX-NOPOPCNT-NEXT: shrb %cl
-; AVX-NOPOPCNT-NEXT: andb $85, %cl
-; AVX-NOPOPCNT-NEXT: subb %cl, %al
-; AVX-NOPOPCNT-NEXT: movb %al, %cl
-; AVX-NOPOPCNT-NEXT: andb $51, %cl
-; AVX-NOPOPCNT-NEXT: shrb $2, %al
-; AVX-NOPOPCNT-NEXT: andb $51, %al
-; AVX-NOPOPCNT-NEXT: addb %cl, %al
-; AVX-NOPOPCNT-NEXT: movb %al, %cl
-; AVX-NOPOPCNT-NEXT: shrb $4, %cl
-; AVX-NOPOPCNT-NEXT: addb %al, %cl
-; AVX-NOPOPCNT-NEXT: andb $15, %cl
-; AVX-NOPOPCNT-NEXT: movzbl %cl, %eax
-; AVX-NOPOPCNT-NEXT: vpinsrb $4, %eax, %xmm1, %xmm1
-; AVX-NOPOPCNT-NEXT: vpextrb $5, %xmm0, %eax
-; AVX-NOPOPCNT-NEXT: movb %al, %cl
-; AVX-NOPOPCNT-NEXT: shrb %cl
-; AVX-NOPOPCNT-NEXT: andb $85, %cl
-; AVX-NOPOPCNT-NEXT: subb %cl, %al
-; AVX-NOPOPCNT-NEXT: movb %al, %cl
-; AVX-NOPOPCNT-NEXT: andb $51, %cl
-; AVX-NOPOPCNT-NEXT: shrb $2, %al
-; AVX-NOPOPCNT-NEXT: andb $51, %al
-; AVX-NOPOPCNT-NEXT: addb %cl, %al
-; AVX-NOPOPCNT-NEXT: movb %al, %cl
-; AVX-NOPOPCNT-NEXT: shrb $4, %cl
-; AVX-NOPOPCNT-NEXT: addb %al, %cl
-; AVX-NOPOPCNT-NEXT: andb $15, %cl
-; AVX-NOPOPCNT-NEXT: movzbl %cl, %eax
-; AVX-NOPOPCNT-NEXT: vpinsrb $5, %eax, %xmm1, %xmm1
-; AVX-NOPOPCNT-NEXT: vpextrb $6, %xmm0, %eax
-; AVX-NOPOPCNT-NEXT: movb %al, %cl
-; AVX-NOPOPCNT-NEXT: shrb %cl
-; AVX-NOPOPCNT-NEXT: andb $85, %cl
-; AVX-NOPOPCNT-NEXT: subb %cl, %al
-; AVX-NOPOPCNT-NEXT: movb %al, %cl
-; AVX-NOPOPCNT-NEXT: andb $51, %cl
-; AVX-NOPOPCNT-NEXT: shrb $2, %al
-; AVX-NOPOPCNT-NEXT: andb $51, %al
-; AVX-NOPOPCNT-NEXT: addb %cl, %al
-; AVX-NOPOPCNT-NEXT: movb %al, %cl
-; AVX-NOPOPCNT-NEXT: shrb $4, %cl
-; AVX-NOPOPCNT-NEXT: addb %al, %cl
-; AVX-NOPOPCNT-NEXT: andb $15, %cl
-; AVX-NOPOPCNT-NEXT: movzbl %cl, %eax
-; AVX-NOPOPCNT-NEXT: vpinsrb $6, %eax, %xmm1, %xmm1
-; AVX-NOPOPCNT-NEXT: vpextrb $7, %xmm0, %eax
-; AVX-NOPOPCNT-NEXT: movb %al, %cl
-; AVX-NOPOPCNT-NEXT: shrb %cl
-; AVX-NOPOPCNT-NEXT: andb $85, %cl
-; AVX-NOPOPCNT-NEXT: subb %cl, %al
-; AVX-NOPOPCNT-NEXT: movb %al, %cl
-; AVX-NOPOPCNT-NEXT: andb $51, %cl
-; AVX-NOPOPCNT-NEXT: shrb $2, %al
-; AVX-NOPOPCNT-NEXT: andb $51, %al
-; AVX-NOPOPCNT-NEXT: addb %cl, %al
-; AVX-NOPOPCNT-NEXT: movb %al, %cl
-; AVX-NOPOPCNT-NEXT: shrb $4, %cl
-; AVX-NOPOPCNT-NEXT: addb %al, %cl
-; AVX-NOPOPCNT-NEXT: andb $15, %cl
-; AVX-NOPOPCNT-NEXT: movzbl %cl, %eax
-; AVX-NOPOPCNT-NEXT: vpinsrb $7, %eax, %xmm1, %xmm1
-; AVX-NOPOPCNT-NEXT: vpextrb $8, %xmm0, %eax
-; AVX-NOPOPCNT-NEXT: movb %al, %cl
-; AVX-NOPOPCNT-NEXT: shrb %cl
-; AVX-NOPOPCNT-NEXT: andb $85, %cl
-; AVX-NOPOPCNT-NEXT: subb %cl, %al
-; AVX-NOPOPCNT-NEXT: movb %al, %cl
-; AVX-NOPOPCNT-NEXT: andb $51, %cl
-; AVX-NOPOPCNT-NEXT: shrb $2, %al
-; AVX-NOPOPCNT-NEXT: andb $51, %al
-; AVX-NOPOPCNT-NEXT: addb %cl, %al
-; AVX-NOPOPCNT-NEXT: movb %al, %cl
-; AVX-NOPOPCNT-NEXT: shrb $4, %cl
-; AVX-NOPOPCNT-NEXT: addb %al, %cl
-; AVX-NOPOPCNT-NEXT: andb $15, %cl
-; AVX-NOPOPCNT-NEXT: movzbl %cl, %eax
-; AVX-NOPOPCNT-NEXT: vpinsrb $8, %eax, %xmm1, %xmm1
-; AVX-NOPOPCNT-NEXT: vpextrb $9, %xmm0, %eax
-; AVX-NOPOPCNT-NEXT: movb %al, %cl
-; AVX-NOPOPCNT-NEXT: shrb %cl
-; AVX-NOPOPCNT-NEXT: andb $85, %cl
-; AVX-NOPOPCNT-NEXT: subb %cl, %al
-; AVX-NOPOPCNT-NEXT: movb %al, %cl
-; AVX-NOPOPCNT-NEXT: andb $51, %cl
-; AVX-NOPOPCNT-NEXT: shrb $2, %al
-; AVX-NOPOPCNT-NEXT: andb $51, %al
-; AVX-NOPOPCNT-NEXT: addb %cl, %al
-; AVX-NOPOPCNT-NEXT: movb %al, %cl
-; AVX-NOPOPCNT-NEXT: shrb $4, %cl
-; AVX-NOPOPCNT-NEXT: addb %al, %cl
-; AVX-NOPOPCNT-NEXT: andb $15, %cl
-; AVX-NOPOPCNT-NEXT: movzbl %cl, %eax
-; AVX-NOPOPCNT-NEXT: vpinsrb $9, %eax, %xmm1, %xmm1
-; AVX-NOPOPCNT-NEXT: vpextrb $10, %xmm0, %eax
-; AVX-NOPOPCNT-NEXT: movb %al, %cl
-; AVX-NOPOPCNT-NEXT: shrb %cl
-; AVX-NOPOPCNT-NEXT: andb $85, %cl
-; AVX-NOPOPCNT-NEXT: subb %cl, %al
-; AVX-NOPOPCNT-NEXT: movb %al, %cl
-; AVX-NOPOPCNT-NEXT: andb $51, %cl
-; AVX-NOPOPCNT-NEXT: shrb $2, %al
-; AVX-NOPOPCNT-NEXT: andb $51, %al
-; AVX-NOPOPCNT-NEXT: addb %cl, %al
-; AVX-NOPOPCNT-NEXT: movb %al, %cl
-; AVX-NOPOPCNT-NEXT: shrb $4, %cl
-; AVX-NOPOPCNT-NEXT: addb %al, %cl
-; AVX-NOPOPCNT-NEXT: andb $15, %cl
-; AVX-NOPOPCNT-NEXT: movzbl %cl, %eax
-; AVX-NOPOPCNT-NEXT: vpinsrb $10, %eax, %xmm1, %xmm1
-; AVX-NOPOPCNT-NEXT: vpextrb $11, %xmm0, %eax
-; AVX-NOPOPCNT-NEXT: movb %al, %cl
-; AVX-NOPOPCNT-NEXT: shrb %cl
-; AVX-NOPOPCNT-NEXT: andb $85, %cl
-; AVX-NOPOPCNT-NEXT: subb %cl, %al
-; AVX-NOPOPCNT-NEXT: movb %al, %cl
-; AVX-NOPOPCNT-NEXT: andb $51, %cl
-; AVX-NOPOPCNT-NEXT: shrb $2, %al
-; AVX-NOPOPCNT-NEXT: andb $51, %al
-; AVX-NOPOPCNT-NEXT: addb %cl, %al
-; AVX-NOPOPCNT-NEXT: movb %al, %cl
-; AVX-NOPOPCNT-NEXT: shrb $4, %cl
-; AVX-NOPOPCNT-NEXT: addb %al, %cl
-; AVX-NOPOPCNT-NEXT: andb $15, %cl
-; AVX-NOPOPCNT-NEXT: movzbl %cl, %eax
-; AVX-NOPOPCNT-NEXT: vpinsrb $11, %eax, %xmm1, %xmm1
-; AVX-NOPOPCNT-NEXT: vpextrb $12, %xmm0, %eax
-; AVX-NOPOPCNT-NEXT: movb %al, %cl
-; AVX-NOPOPCNT-NEXT: shrb %cl
-; AVX-NOPOPCNT-NEXT: andb $85, %cl
-; AVX-NOPOPCNT-NEXT: subb %cl, %al
-; AVX-NOPOPCNT-NEXT: movb %al, %cl
-; AVX-NOPOPCNT-NEXT: andb $51, %cl
-; AVX-NOPOPCNT-NEXT: shrb $2, %al
-; AVX-NOPOPCNT-NEXT: andb $51, %al
-; AVX-NOPOPCNT-NEXT: addb %cl, %al
-; AVX-NOPOPCNT-NEXT: movb %al, %cl
-; AVX-NOPOPCNT-NEXT: shrb $4, %cl
-; AVX-NOPOPCNT-NEXT: addb %al, %cl
-; AVX-NOPOPCNT-NEXT: andb $15, %cl
-; AVX-NOPOPCNT-NEXT: movzbl %cl, %eax
-; AVX-NOPOPCNT-NEXT: vpinsrb $12, %eax, %xmm1, %xmm1
-; AVX-NOPOPCNT-NEXT: vpextrb $13, %xmm0, %eax
-; AVX-NOPOPCNT-NEXT: movb %al, %cl
-; AVX-NOPOPCNT-NEXT: shrb %cl
-; AVX-NOPOPCNT-NEXT: andb $85, %cl
-; AVX-NOPOPCNT-NEXT: subb %cl, %al
-; AVX-NOPOPCNT-NEXT: movb %al, %cl
-; AVX-NOPOPCNT-NEXT: andb $51, %cl
-; AVX-NOPOPCNT-NEXT: shrb $2, %al
-; AVX-NOPOPCNT-NEXT: andb $51, %al
-; AVX-NOPOPCNT-NEXT: addb %cl, %al
-; AVX-NOPOPCNT-NEXT: movb %al, %cl
-; AVX-NOPOPCNT-NEXT: shrb $4, %cl
-; AVX-NOPOPCNT-NEXT: addb %al, %cl
-; AVX-NOPOPCNT-NEXT: andb $15, %cl
-; AVX-NOPOPCNT-NEXT: movzbl %cl, %eax
-; AVX-NOPOPCNT-NEXT: vpinsrb $13, %eax, %xmm1, %xmm1
-; AVX-NOPOPCNT-NEXT: vpextrb $14, %xmm0, %eax
-; AVX-NOPOPCNT-NEXT: movb %al, %cl
-; AVX-NOPOPCNT-NEXT: shrb %cl
-; AVX-NOPOPCNT-NEXT: andb $85, %cl
-; AVX-NOPOPCNT-NEXT: subb %cl, %al
-; AVX-NOPOPCNT-NEXT: movb %al, %cl
-; AVX-NOPOPCNT-NEXT: andb $51, %cl
-; AVX-NOPOPCNT-NEXT: shrb $2, %al
-; AVX-NOPOPCNT-NEXT: andb $51, %al
-; AVX-NOPOPCNT-NEXT: addb %cl, %al
-; AVX-NOPOPCNT-NEXT: movb %al, %cl
-; AVX-NOPOPCNT-NEXT: shrb $4, %cl
-; AVX-NOPOPCNT-NEXT: addb %al, %cl
-; AVX-NOPOPCNT-NEXT: andb $15, %cl
-; AVX-NOPOPCNT-NEXT: movzbl %cl, %eax
-; AVX-NOPOPCNT-NEXT: vpinsrb $14, %eax, %xmm1, %xmm1
-; AVX-NOPOPCNT-NEXT: vpextrb $15, %xmm0, %eax
-; AVX-NOPOPCNT-NEXT: movb %al, %cl
-; AVX-NOPOPCNT-NEXT: shrb %cl
-; AVX-NOPOPCNT-NEXT: andb $85, %cl
-; AVX-NOPOPCNT-NEXT: subb %cl, %al
-; AVX-NOPOPCNT-NEXT: movb %al, %cl
-; AVX-NOPOPCNT-NEXT: andb $51, %cl
-; AVX-NOPOPCNT-NEXT: shrb $2, %al
-; AVX-NOPOPCNT-NEXT: andb $51, %al
-; AVX-NOPOPCNT-NEXT: addb %cl, %al
-; AVX-NOPOPCNT-NEXT: movb %al, %cl
-; AVX-NOPOPCNT-NEXT: shrb $4, %cl
-; AVX-NOPOPCNT-NEXT: addb %al, %cl
-; AVX-NOPOPCNT-NEXT: andb $15, %cl
-; AVX-NOPOPCNT-NEXT: movzbl %cl, %eax
-; AVX-NOPOPCNT-NEXT: vpinsrb $15, %eax, %xmm1, %xmm0
-; AVX-NOPOPCNT-NEXT: retq
- %out = call <16 x i8> @llvm.ctpop.v16i8(<16 x i8> %in)
- ret <16 x i8> %out
-}
-
-define <16 x i16> @testv16i16(<16 x i16> %in) {
-; AVX-POPCNT-LABEL: testv16i16:
-; AVX-POPCNT: # BB#0:
-; AVX-POPCNT-NEXT: vextractf128 $1, %ymm0, %xmm1
-; AVX-POPCNT-NEXT: vpextrw $1, %xmm1, %eax
-; AVX-POPCNT-NEXT: popcntw %ax, %ax
-; AVX-POPCNT-NEXT: vmovd %xmm1, %ecx
-; AVX-POPCNT-NEXT: popcntw %cx, %cx
-; AVX-POPCNT-NEXT: vmovd %ecx, %xmm2
-; AVX-POPCNT-NEXT: vpinsrw $1, %eax, %xmm2, %xmm2
-; AVX-POPCNT-NEXT: vpextrw $2, %xmm1, %eax
-; AVX-POPCNT-NEXT: popcntw %ax, %ax
-; AVX-POPCNT-NEXT: vpinsrw $2, %eax, %xmm2, %xmm2
-; AVX-POPCNT-NEXT: vpextrw $3, %xmm1, %eax
-; AVX-POPCNT-NEXT: popcntw %ax, %ax
-; AVX-POPCNT-NEXT: vpinsrw $3, %eax, %xmm2, %xmm2
-; AVX-POPCNT-NEXT: vpextrw $4, %xmm1, %eax
-; AVX-POPCNT-NEXT: popcntw %ax, %ax
-; AVX-POPCNT-NEXT: vpinsrw $4, %eax, %xmm2, %xmm2
-; AVX-POPCNT-NEXT: vpextrw $5, %xmm1, %eax
-; AVX-POPCNT-NEXT: popcntw %ax, %ax
-; AVX-POPCNT-NEXT: vpinsrw $5, %eax, %xmm2, %xmm2
-; AVX-POPCNT-NEXT: vpextrw $6, %xmm1, %eax
-; AVX-POPCNT-NEXT: popcntw %ax, %ax
-; AVX-POPCNT-NEXT: vpinsrw $6, %eax, %xmm2, %xmm2
-; AVX-POPCNT-NEXT: vpextrw $7, %xmm1, %eax
-; AVX-POPCNT-NEXT: popcntw %ax, %ax
-; AVX-POPCNT-NEXT: vpinsrw $7, %eax, %xmm2, %xmm1
-; AVX-POPCNT-NEXT: vpextrw $1, %xmm0, %eax
-; AVX-POPCNT-NEXT: popcntw %ax, %ax
-; AVX-POPCNT-NEXT: vmovd %xmm0, %ecx
-; AVX-POPCNT-NEXT: popcntw %cx, %cx
-; AVX-POPCNT-NEXT: vmovd %ecx, %xmm2
-; AVX-POPCNT-NEXT: vpinsrw $1, %eax, %xmm2, %xmm2
-; AVX-POPCNT-NEXT: vpextrw $2, %xmm0, %eax
-; AVX-POPCNT-NEXT: popcntw %ax, %ax
-; AVX-POPCNT-NEXT: vpinsrw $2, %eax, %xmm2, %xmm2
-; AVX-POPCNT-NEXT: vpextrw $3, %xmm0, %eax
-; AVX-POPCNT-NEXT: popcntw %ax, %ax
-; AVX-POPCNT-NEXT: vpinsrw $3, %eax, %xmm2, %xmm2
-; AVX-POPCNT-NEXT: vpextrw $4, %xmm0, %eax
-; AVX-POPCNT-NEXT: popcntw %ax, %ax
-; AVX-POPCNT-NEXT: vpinsrw $4, %eax, %xmm2, %xmm2
-; AVX-POPCNT-NEXT: vpextrw $5, %xmm0, %eax
-; AVX-POPCNT-NEXT: popcntw %ax, %ax
-; AVX-POPCNT-NEXT: vpinsrw $5, %eax, %xmm2, %xmm2
-; AVX-POPCNT-NEXT: vpextrw $6, %xmm0, %eax
-; AVX-POPCNT-NEXT: popcntw %ax, %ax
-; AVX-POPCNT-NEXT: vpinsrw $6, %eax, %xmm2, %xmm2
-; AVX-POPCNT-NEXT: vpextrw $7, %xmm0, %eax
-; AVX-POPCNT-NEXT: popcntw %ax, %ax
-; AVX-POPCNT-NEXT: vpinsrw $7, %eax, %xmm2, %xmm0
-; AVX-POPCNT-NEXT: vinsertf128 $1, %xmm1, %ymm0, %ymm0
-; AVX-POPCNT-NEXT: retq
-;
-; AVX-NOPOPCNT-LABEL: testv16i16:
-; AVX-NOPOPCNT: # BB#0:
-; AVX-NOPOPCNT-NEXT: vextractf128 $1, %ymm0, %xmm1
-; AVX-NOPOPCNT-NEXT: vpextrw $1, %xmm1, %eax
-; AVX-NOPOPCNT-NEXT: movl %eax, %ecx
-; AVX-NOPOPCNT-NEXT: shrl %ecx
-; AVX-NOPOPCNT-NEXT: andl $21845, %ecx # imm = 0x5555
-; AVX-NOPOPCNT-NEXT: subl %ecx, %eax
-; AVX-NOPOPCNT-NEXT: movl %eax, %ecx
-; AVX-NOPOPCNT-NEXT: andl $13107, %ecx # imm = 0x3333
-; AVX-NOPOPCNT-NEXT: shrl $2, %eax
-; AVX-NOPOPCNT-NEXT: andl $13107, %eax # imm = 0x3333
-; AVX-NOPOPCNT-NEXT: addl %ecx, %eax
-; AVX-NOPOPCNT-NEXT: movl %eax, %ecx
-; AVX-NOPOPCNT-NEXT: andl $65520, %ecx # imm = 0xFFF0
-; AVX-NOPOPCNT-NEXT: shrl $4, %ecx
-; AVX-NOPOPCNT-NEXT: addl %eax, %ecx
-; AVX-NOPOPCNT-NEXT: andl $3855, %ecx # imm = 0xF0F
-; AVX-NOPOPCNT-NEXT: imull $257, %ecx, %eax # imm = 0x101
-; AVX-NOPOPCNT-NEXT: movzbl %ah, %eax # NOREX
-; AVX-NOPOPCNT-NEXT: vmovd %xmm1, %ecx
-; AVX-NOPOPCNT-NEXT: movl %ecx, %edx
-; AVX-NOPOPCNT-NEXT: shrl %edx
-; AVX-NOPOPCNT-NEXT: andl $21845, %edx # imm = 0x5555
-; AVX-NOPOPCNT-NEXT: subl %edx, %ecx
-; AVX-NOPOPCNT-NEXT: movl %ecx, %edx
-; AVX-NOPOPCNT-NEXT: andl $13107, %edx # imm = 0x3333
-; AVX-NOPOPCNT-NEXT: shrl $2, %ecx
-; AVX-NOPOPCNT-NEXT: andl $13107, %ecx # imm = 0x3333
-; AVX-NOPOPCNT-NEXT: addl %edx, %ecx
-; AVX-NOPOPCNT-NEXT: movl %ecx, %edx
-; AVX-NOPOPCNT-NEXT: andl $65520, %edx # imm = 0xFFF0
-; AVX-NOPOPCNT-NEXT: shrl $4, %edx
-; AVX-NOPOPCNT-NEXT: addl %ecx, %edx
-; AVX-NOPOPCNT-NEXT: andl $3855, %edx # imm = 0xF0F
-; AVX-NOPOPCNT-NEXT: imull $257, %edx, %ecx # imm = 0x101
-; AVX-NOPOPCNT-NEXT: movzbl %ch, %ecx # NOREX
-; AVX-NOPOPCNT-NEXT: vmovd %ecx, %xmm2
-; AVX-NOPOPCNT-NEXT: vpinsrw $1, %eax, %xmm2, %xmm2
-; AVX-NOPOPCNT-NEXT: vpextrw $2, %xmm1, %eax
-; AVX-NOPOPCNT-NEXT: movl %eax, %ecx
-; AVX-NOPOPCNT-NEXT: shrl %ecx
-; AVX-NOPOPCNT-NEXT: andl $21845, %ecx # imm = 0x5555
-; AVX-NOPOPCNT-NEXT: subl %ecx, %eax
-; AVX-NOPOPCNT-NEXT: movl %eax, %ecx
-; AVX-NOPOPCNT-NEXT: andl $13107, %ecx # imm = 0x3333
-; AVX-NOPOPCNT-NEXT: shrl $2, %eax
-; AVX-NOPOPCNT-NEXT: andl $13107, %eax # imm = 0x3333
-; AVX-NOPOPCNT-NEXT: addl %ecx, %eax
-; AVX-NOPOPCNT-NEXT: movl %eax, %ecx
-; AVX-NOPOPCNT-NEXT: andl $65520, %ecx # imm = 0xFFF0
-; AVX-NOPOPCNT-NEXT: shrl $4, %ecx
-; AVX-NOPOPCNT-NEXT: addl %eax, %ecx
-; AVX-NOPOPCNT-NEXT: andl $3855, %ecx # imm = 0xF0F
-; AVX-NOPOPCNT-NEXT: imull $257, %ecx, %eax # imm = 0x101
-; AVX-NOPOPCNT-NEXT: movzbl %ah, %eax # NOREX
-; AVX-NOPOPCNT-NEXT: vpinsrw $2, %eax, %xmm2, %xmm2
-; AVX-NOPOPCNT-NEXT: vpextrw $3, %xmm1, %eax
-; AVX-NOPOPCNT-NEXT: movl %eax, %ecx
-; AVX-NOPOPCNT-NEXT: shrl %ecx
-; AVX-NOPOPCNT-NEXT: andl $21845, %ecx # imm = 0x5555
-; AVX-NOPOPCNT-NEXT: subl %ecx, %eax
-; AVX-NOPOPCNT-NEXT: movl %eax, %ecx
-; AVX-NOPOPCNT-NEXT: andl $13107, %ecx # imm = 0x3333
-; AVX-NOPOPCNT-NEXT: shrl $2, %eax
-; AVX-NOPOPCNT-NEXT: andl $13107, %eax # imm = 0x3333
-; AVX-NOPOPCNT-NEXT: addl %ecx, %eax
-; AVX-NOPOPCNT-NEXT: movl %eax, %ecx
-; AVX-NOPOPCNT-NEXT: andl $65520, %ecx # imm = 0xFFF0
-; AVX-NOPOPCNT-NEXT: shrl $4, %ecx
-; AVX-NOPOPCNT-NEXT: addl %eax, %ecx
-; AVX-NOPOPCNT-NEXT: andl $3855, %ecx # imm = 0xF0F
-; AVX-NOPOPCNT-NEXT: imull $257, %ecx, %eax # imm = 0x101
-; AVX-NOPOPCNT-NEXT: movzbl %ah, %eax # NOREX
-; AVX-NOPOPCNT-NEXT: vpinsrw $3, %eax, %xmm2, %xmm2
-; AVX-NOPOPCNT-NEXT: vpextrw $4, %xmm1, %eax
-; AVX-NOPOPCNT-NEXT: movl %eax, %ecx
-; AVX-NOPOPCNT-NEXT: shrl %ecx
-; AVX-NOPOPCNT-NEXT: andl $21845, %ecx # imm = 0x5555
-; AVX-NOPOPCNT-NEXT: subl %ecx, %eax
-; AVX-NOPOPCNT-NEXT: movl %eax, %ecx
-; AVX-NOPOPCNT-NEXT: andl $13107, %ecx # imm = 0x3333
-; AVX-NOPOPCNT-NEXT: shrl $2, %eax
-; AVX-NOPOPCNT-NEXT: andl $13107, %eax # imm = 0x3333
-; AVX-NOPOPCNT-NEXT: addl %ecx, %eax
-; AVX-NOPOPCNT-NEXT: movl %eax, %ecx
-; AVX-NOPOPCNT-NEXT: andl $65520, %ecx # imm = 0xFFF0
-; AVX-NOPOPCNT-NEXT: shrl $4, %ecx
-; AVX-NOPOPCNT-NEXT: addl %eax, %ecx
-; AVX-NOPOPCNT-NEXT: andl $3855, %ecx # imm = 0xF0F
-; AVX-NOPOPCNT-NEXT: imull $257, %ecx, %eax # imm = 0x101
-; AVX-NOPOPCNT-NEXT: movzbl %ah, %eax # NOREX
-; AVX-NOPOPCNT-NEXT: vpinsrw $4, %eax, %xmm2, %xmm2
-; AVX-NOPOPCNT-NEXT: vpextrw $5, %xmm1, %eax
-; AVX-NOPOPCNT-NEXT: movl %eax, %ecx
-; AVX-NOPOPCNT-NEXT: shrl %ecx
-; AVX-NOPOPCNT-NEXT: andl $21845, %ecx # imm = 0x5555
-; AVX-NOPOPCNT-NEXT: subl %ecx, %eax
-; AVX-NOPOPCNT-NEXT: movl %eax, %ecx
-; AVX-NOPOPCNT-NEXT: andl $13107, %ecx # imm = 0x3333
-; AVX-NOPOPCNT-NEXT: shrl $2, %eax
-; AVX-NOPOPCNT-NEXT: andl $13107, %eax # imm = 0x3333
-; AVX-NOPOPCNT-NEXT: addl %ecx, %eax
-; AVX-NOPOPCNT-NEXT: movl %eax, %ecx
-; AVX-NOPOPCNT-NEXT: andl $65520, %ecx # imm = 0xFFF0
-; AVX-NOPOPCNT-NEXT: shrl $4, %ecx
-; AVX-NOPOPCNT-NEXT: addl %eax, %ecx
-; AVX-NOPOPCNT-NEXT: andl $3855, %ecx # imm = 0xF0F
-; AVX-NOPOPCNT-NEXT: imull $257, %ecx, %eax # imm = 0x101
-; AVX-NOPOPCNT-NEXT: movzbl %ah, %eax # NOREX
-; AVX-NOPOPCNT-NEXT: vpinsrw $5, %eax, %xmm2, %xmm2
-; AVX-NOPOPCNT-NEXT: vpextrw $6, %xmm1, %eax
-; AVX-NOPOPCNT-NEXT: movl %eax, %ecx
-; AVX-NOPOPCNT-NEXT: shrl %ecx
-; AVX-NOPOPCNT-NEXT: andl $21845, %ecx # imm = 0x5555
-; AVX-NOPOPCNT-NEXT: subl %ecx, %eax
-; AVX-NOPOPCNT-NEXT: movl %eax, %ecx
-; AVX-NOPOPCNT-NEXT: andl $13107, %ecx # imm = 0x3333
-; AVX-NOPOPCNT-NEXT: shrl $2, %eax
-; AVX-NOPOPCNT-NEXT: andl $13107, %eax # imm = 0x3333
-; AVX-NOPOPCNT-NEXT: addl %ecx, %eax
-; AVX-NOPOPCNT-NEXT: movl %eax, %ecx
-; AVX-NOPOPCNT-NEXT: andl $65520, %ecx # imm = 0xFFF0
-; AVX-NOPOPCNT-NEXT: shrl $4, %ecx
-; AVX-NOPOPCNT-NEXT: addl %eax, %ecx
-; AVX-NOPOPCNT-NEXT: andl $3855, %ecx # imm = 0xF0F
-; AVX-NOPOPCNT-NEXT: imull $257, %ecx, %eax # imm = 0x101
-; AVX-NOPOPCNT-NEXT: movzbl %ah, %eax # NOREX
-; AVX-NOPOPCNT-NEXT: vpinsrw $6, %eax, %xmm2, %xmm2
-; AVX-NOPOPCNT-NEXT: vpextrw $7, %xmm1, %eax
-; AVX-NOPOPCNT-NEXT: movl %eax, %ecx
-; AVX-NOPOPCNT-NEXT: shrl %ecx
-; AVX-NOPOPCNT-NEXT: andl $21845, %ecx # imm = 0x5555
-; AVX-NOPOPCNT-NEXT: subl %ecx, %eax
-; AVX-NOPOPCNT-NEXT: movl %eax, %ecx
-; AVX-NOPOPCNT-NEXT: andl $13107, %ecx # imm = 0x3333
-; AVX-NOPOPCNT-NEXT: shrl $2, %eax
-; AVX-NOPOPCNT-NEXT: andl $13107, %eax # imm = 0x3333
-; AVX-NOPOPCNT-NEXT: addl %ecx, %eax
-; AVX-NOPOPCNT-NEXT: movl %eax, %ecx
-; AVX-NOPOPCNT-NEXT: andl $65520, %ecx # imm = 0xFFF0
-; AVX-NOPOPCNT-NEXT: shrl $4, %ecx
-; AVX-NOPOPCNT-NEXT: addl %eax, %ecx
-; AVX-NOPOPCNT-NEXT: andl $3855, %ecx # imm = 0xF0F
-; AVX-NOPOPCNT-NEXT: imull $257, %ecx, %eax # imm = 0x101
-; AVX-NOPOPCNT-NEXT: movzbl %ah, %eax # NOREX
-; AVX-NOPOPCNT-NEXT: vpinsrw $7, %eax, %xmm2, %xmm1
-; AVX-NOPOPCNT-NEXT: vpextrw $1, %xmm0, %eax
-; AVX-NOPOPCNT-NEXT: movl %eax, %ecx
-; AVX-NOPOPCNT-NEXT: shrl %ecx
-; AVX-NOPOPCNT-NEXT: andl $21845, %ecx # imm = 0x5555
-; AVX-NOPOPCNT-NEXT: subl %ecx, %eax
-; AVX-NOPOPCNT-NEXT: movl %eax, %ecx
-; AVX-NOPOPCNT-NEXT: andl $13107, %ecx # imm = 0x3333
-; AVX-NOPOPCNT-NEXT: shrl $2, %eax
-; AVX-NOPOPCNT-NEXT: andl $13107, %eax # imm = 0x3333
-; AVX-NOPOPCNT-NEXT: addl %ecx, %eax
-; AVX-NOPOPCNT-NEXT: movl %eax, %ecx
-; AVX-NOPOPCNT-NEXT: andl $65520, %ecx # imm = 0xFFF0
-; AVX-NOPOPCNT-NEXT: shrl $4, %ecx
-; AVX-NOPOPCNT-NEXT: addl %eax, %ecx
-; AVX-NOPOPCNT-NEXT: andl $3855, %ecx # imm = 0xF0F
-; AVX-NOPOPCNT-NEXT: imull $257, %ecx, %eax # imm = 0x101
-; AVX-NOPOPCNT-NEXT: movzbl %ah, %eax # NOREX
-; AVX-NOPOPCNT-NEXT: vmovd %xmm0, %ecx
-; AVX-NOPOPCNT-NEXT: movl %ecx, %edx
-; AVX-NOPOPCNT-NEXT: shrl %edx
-; AVX-NOPOPCNT-NEXT: andl $21845, %edx # imm = 0x5555
-; AVX-NOPOPCNT-NEXT: subl %edx, %ecx
-; AVX-NOPOPCNT-NEXT: movl %ecx, %edx
-; AVX-NOPOPCNT-NEXT: andl $13107, %edx # imm = 0x3333
-; AVX-NOPOPCNT-NEXT: shrl $2, %ecx
-; AVX-NOPOPCNT-NEXT: andl $13107, %ecx # imm = 0x3333
-; AVX-NOPOPCNT-NEXT: addl %edx, %ecx
-; AVX-NOPOPCNT-NEXT: movl %ecx, %edx
-; AVX-NOPOPCNT-NEXT: andl $65520, %edx # imm = 0xFFF0
-; AVX-NOPOPCNT-NEXT: shrl $4, %edx
-; AVX-NOPOPCNT-NEXT: addl %ecx, %edx
-; AVX-NOPOPCNT-NEXT: andl $3855, %edx # imm = 0xF0F
-; AVX-NOPOPCNT-NEXT: imull $257, %edx, %ecx # imm = 0x101
-; AVX-NOPOPCNT-NEXT: movzbl %ch, %ecx # NOREX
-; AVX-NOPOPCNT-NEXT: vmovd %ecx, %xmm2
-; AVX-NOPOPCNT-NEXT: vpinsrw $1, %eax, %xmm2, %xmm2
-; AVX-NOPOPCNT-NEXT: vpextrw $2, %xmm0, %eax
-; AVX-NOPOPCNT-NEXT: movl %eax, %ecx
-; AVX-NOPOPCNT-NEXT: shrl %ecx
-; AVX-NOPOPCNT-NEXT: andl $21845, %ecx # imm = 0x5555
-; AVX-NOPOPCNT-NEXT: subl %ecx, %eax
-; AVX-NOPOPCNT-NEXT: movl %eax, %ecx
-; AVX-NOPOPCNT-NEXT: andl $13107, %ecx # imm = 0x3333
-; AVX-NOPOPCNT-NEXT: shrl $2, %eax
-; AVX-NOPOPCNT-NEXT: andl $13107, %eax # imm = 0x3333
-; AVX-NOPOPCNT-NEXT: addl %ecx, %eax
-; AVX-NOPOPCNT-NEXT: movl %eax, %ecx
-; AVX-NOPOPCNT-NEXT: andl $65520, %ecx # imm = 0xFFF0
-; AVX-NOPOPCNT-NEXT: shrl $4, %ecx
-; AVX-NOPOPCNT-NEXT: addl %eax, %ecx
-; AVX-NOPOPCNT-NEXT: andl $3855, %ecx # imm = 0xF0F
-; AVX-NOPOPCNT-NEXT: imull $257, %ecx, %eax # imm = 0x101
-; AVX-NOPOPCNT-NEXT: movzbl %ah, %eax # NOREX
-; AVX-NOPOPCNT-NEXT: vpinsrw $2, %eax, %xmm2, %xmm2
-; AVX-NOPOPCNT-NEXT: vpextrw $3, %xmm0, %eax
-; AVX-NOPOPCNT-NEXT: movl %eax, %ecx
-; AVX-NOPOPCNT-NEXT: shrl %ecx
-; AVX-NOPOPCNT-NEXT: andl $21845, %ecx # imm = 0x5555
-; AVX-NOPOPCNT-NEXT: subl %ecx, %eax
-; AVX-NOPOPCNT-NEXT: movl %eax, %ecx
-; AVX-NOPOPCNT-NEXT: andl $13107, %ecx # imm = 0x3333
-; AVX-NOPOPCNT-NEXT: shrl $2, %eax
-; AVX-NOPOPCNT-NEXT: andl $13107, %eax # imm = 0x3333
-; AVX-NOPOPCNT-NEXT: addl %ecx, %eax
-; AVX-NOPOPCNT-NEXT: movl %eax, %ecx
-; AVX-NOPOPCNT-NEXT: andl $65520, %ecx # imm = 0xFFF0
-; AVX-NOPOPCNT-NEXT: shrl $4, %ecx
-; AVX-NOPOPCNT-NEXT: addl %eax, %ecx
-; AVX-NOPOPCNT-NEXT: andl $3855, %ecx # imm = 0xF0F
-; AVX-NOPOPCNT-NEXT: imull $257, %ecx, %eax # imm = 0x101
-; AVX-NOPOPCNT-NEXT: movzbl %ah, %eax # NOREX
-; AVX-NOPOPCNT-NEXT: vpinsrw $3, %eax, %xmm2, %xmm2
-; AVX-NOPOPCNT-NEXT: vpextrw $4, %xmm0, %eax
-; AVX-NOPOPCNT-NEXT: movl %eax, %ecx
-; AVX-NOPOPCNT-NEXT: shrl %ecx
-; AVX-NOPOPCNT-NEXT: andl $21845, %ecx # imm = 0x5555
-; AVX-NOPOPCNT-NEXT: subl %ecx, %eax
-; AVX-NOPOPCNT-NEXT: movl %eax, %ecx
-; AVX-NOPOPCNT-NEXT: andl $13107, %ecx # imm = 0x3333
-; AVX-NOPOPCNT-NEXT: shrl $2, %eax
-; AVX-NOPOPCNT-NEXT: andl $13107, %eax # imm = 0x3333
-; AVX-NOPOPCNT-NEXT: addl %ecx, %eax
-; AVX-NOPOPCNT-NEXT: movl %eax, %ecx
-; AVX-NOPOPCNT-NEXT: andl $65520, %ecx # imm = 0xFFF0
-; AVX-NOPOPCNT-NEXT: shrl $4, %ecx
-; AVX-NOPOPCNT-NEXT: addl %eax, %ecx
-; AVX-NOPOPCNT-NEXT: andl $3855, %ecx # imm = 0xF0F
-; AVX-NOPOPCNT-NEXT: imull $257, %ecx, %eax # imm = 0x101
-; AVX-NOPOPCNT-NEXT: movzbl %ah, %eax # NOREX
-; AVX-NOPOPCNT-NEXT: vpinsrw $4, %eax, %xmm2, %xmm2
-; AVX-NOPOPCNT-NEXT: vpextrw $5, %xmm0, %eax
-; AVX-NOPOPCNT-NEXT: movl %eax, %ecx
-; AVX-NOPOPCNT-NEXT: shrl %ecx
-; AVX-NOPOPCNT-NEXT: andl $21845, %ecx # imm = 0x5555
-; AVX-NOPOPCNT-NEXT: subl %ecx, %eax
-; AVX-NOPOPCNT-NEXT: movl %eax, %ecx
-; AVX-NOPOPCNT-NEXT: andl $13107, %ecx # imm = 0x3333
-; AVX-NOPOPCNT-NEXT: shrl $2, %eax
-; AVX-NOPOPCNT-NEXT: andl $13107, %eax # imm = 0x3333
-; AVX-NOPOPCNT-NEXT: addl %ecx, %eax
-; AVX-NOPOPCNT-NEXT: movl %eax, %ecx
-; AVX-NOPOPCNT-NEXT: andl $65520, %ecx # imm = 0xFFF0
-; AVX-NOPOPCNT-NEXT: shrl $4, %ecx
-; AVX-NOPOPCNT-NEXT: addl %eax, %ecx
-; AVX-NOPOPCNT-NEXT: andl $3855, %ecx # imm = 0xF0F
-; AVX-NOPOPCNT-NEXT: imull $257, %ecx, %eax # imm = 0x101
-; AVX-NOPOPCNT-NEXT: movzbl %ah, %eax # NOREX
-; AVX-NOPOPCNT-NEXT: vpinsrw $5, %eax, %xmm2, %xmm2
-; AVX-NOPOPCNT-NEXT: vpextrw $6, %xmm0, %eax
-; AVX-NOPOPCNT-NEXT: movl %eax, %ecx
-; AVX-NOPOPCNT-NEXT: shrl %ecx
-; AVX-NOPOPCNT-NEXT: andl $21845, %ecx # imm = 0x5555
-; AVX-NOPOPCNT-NEXT: subl %ecx, %eax
-; AVX-NOPOPCNT-NEXT: movl %eax, %ecx
-; AVX-NOPOPCNT-NEXT: andl $13107, %ecx # imm = 0x3333
-; AVX-NOPOPCNT-NEXT: shrl $2, %eax
-; AVX-NOPOPCNT-NEXT: andl $13107, %eax # imm = 0x3333
-; AVX-NOPOPCNT-NEXT: addl %ecx, %eax
-; AVX-NOPOPCNT-NEXT: movl %eax, %ecx
-; AVX-NOPOPCNT-NEXT: andl $65520, %ecx # imm = 0xFFF0
-; AVX-NOPOPCNT-NEXT: shrl $4, %ecx
-; AVX-NOPOPCNT-NEXT: addl %eax, %ecx
-; AVX-NOPOPCNT-NEXT: andl $3855, %ecx # imm = 0xF0F
-; AVX-NOPOPCNT-NEXT: imull $257, %ecx, %eax # imm = 0x101
-; AVX-NOPOPCNT-NEXT: movzbl %ah, %eax # NOREX
-; AVX-NOPOPCNT-NEXT: vpinsrw $6, %eax, %xmm2, %xmm2
-; AVX-NOPOPCNT-NEXT: vpextrw $7, %xmm0, %eax
-; AVX-NOPOPCNT-NEXT: movl %eax, %ecx
-; AVX-NOPOPCNT-NEXT: shrl %ecx
-; AVX-NOPOPCNT-NEXT: andl $21845, %ecx # imm = 0x5555
-; AVX-NOPOPCNT-NEXT: subl %ecx, %eax
-; AVX-NOPOPCNT-NEXT: movl %eax, %ecx
-; AVX-NOPOPCNT-NEXT: andl $13107, %ecx # imm = 0x3333
-; AVX-NOPOPCNT-NEXT: shrl $2, %eax
-; AVX-NOPOPCNT-NEXT: andl $13107, %eax # imm = 0x3333
-; AVX-NOPOPCNT-NEXT: addl %ecx, %eax
-; AVX-NOPOPCNT-NEXT: movl %eax, %ecx
-; AVX-NOPOPCNT-NEXT: andl $65520, %ecx # imm = 0xFFF0
-; AVX-NOPOPCNT-NEXT: shrl $4, %ecx
-; AVX-NOPOPCNT-NEXT: addl %eax, %ecx
-; AVX-NOPOPCNT-NEXT: andl $3855, %ecx # imm = 0xF0F
-; AVX-NOPOPCNT-NEXT: imull $257, %ecx, %eax # imm = 0x101
-; AVX-NOPOPCNT-NEXT: movzbl %ah, %eax # NOREX
-; AVX-NOPOPCNT-NEXT: vpinsrw $7, %eax, %xmm2, %xmm0
-; AVX-NOPOPCNT-NEXT: vinsertf128 $1, %xmm1, %ymm0, %ymm0
-; AVX-NOPOPCNT-NEXT: retq
- %out = call <16 x i16> @llvm.ctpop.v16i16(<16 x i16> %in)
- ret <16 x i16> %out
-}
-
-define <8 x i16> @testv8i16(<8 x i16> %in) {
-; AVX-POPCNT-LABEL: testv8i16:
-; AVX-POPCNT: # BB#0:
-; AVX-POPCNT-NEXT: vpextrw $1, %xmm0, %eax
-; AVX-POPCNT-NEXT: popcntw %ax, %ax
-; AVX-POPCNT-NEXT: vmovd %xmm0, %ecx
-; AVX-POPCNT-NEXT: popcntw %cx, %cx
-; AVX-POPCNT-NEXT: vmovd %ecx, %xmm1
-; AVX-POPCNT-NEXT: vpinsrw $1, %eax, %xmm1, %xmm1
-; AVX-POPCNT-NEXT: vpextrw $2, %xmm0, %eax
-; AVX-POPCNT-NEXT: popcntw %ax, %ax
-; AVX-POPCNT-NEXT: vpinsrw $2, %eax, %xmm1, %xmm1
-; AVX-POPCNT-NEXT: vpextrw $3, %xmm0, %eax
-; AVX-POPCNT-NEXT: popcntw %ax, %ax
-; AVX-POPCNT-NEXT: vpinsrw $3, %eax, %xmm1, %xmm1
-; AVX-POPCNT-NEXT: vpextrw $4, %xmm0, %eax
-; AVX-POPCNT-NEXT: popcntw %ax, %ax
-; AVX-POPCNT-NEXT: vpinsrw $4, %eax, %xmm1, %xmm1
-; AVX-POPCNT-NEXT: vpextrw $5, %xmm0, %eax
-; AVX-POPCNT-NEXT: popcntw %ax, %ax
-; AVX-POPCNT-NEXT: vpinsrw $5, %eax, %xmm1, %xmm1
-; AVX-POPCNT-NEXT: vpextrw $6, %xmm0, %eax
-; AVX-POPCNT-NEXT: popcntw %ax, %ax
-; AVX-POPCNT-NEXT: vpinsrw $6, %eax, %xmm1, %xmm1
-; AVX-POPCNT-NEXT: vpextrw $7, %xmm0, %eax
-; AVX-POPCNT-NEXT: popcntw %ax, %ax
-; AVX-POPCNT-NEXT: vpinsrw $7, %eax, %xmm1, %xmm0
-; AVX-POPCNT-NEXT: retq
-;
-; AVX-NOPOPCNT-LABEL: testv8i16:
-; AVX-NOPOPCNT: # BB#0:
-; AVX-NOPOPCNT-NEXT: vpextrw $1, %xmm0, %eax
-; AVX-NOPOPCNT-NEXT: movl %eax, %ecx
-; AVX-NOPOPCNT-NEXT: shrl %ecx
-; AVX-NOPOPCNT-NEXT: andl $21845, %ecx # imm = 0x5555
-; AVX-NOPOPCNT-NEXT: subl %ecx, %eax
-; AVX-NOPOPCNT-NEXT: movl %eax, %ecx
-; AVX-NOPOPCNT-NEXT: andl $13107, %ecx # imm = 0x3333
-; AVX-NOPOPCNT-NEXT: shrl $2, %eax
-; AVX-NOPOPCNT-NEXT: andl $13107, %eax # imm = 0x3333
-; AVX-NOPOPCNT-NEXT: addl %ecx, %eax
-; AVX-NOPOPCNT-NEXT: movl %eax, %ecx
-; AVX-NOPOPCNT-NEXT: andl $65520, %ecx # imm = 0xFFF0
-; AVX-NOPOPCNT-NEXT: shrl $4, %ecx
-; AVX-NOPOPCNT-NEXT: addl %eax, %ecx
-; AVX-NOPOPCNT-NEXT: andl $3855, %ecx # imm = 0xF0F
-; AVX-NOPOPCNT-NEXT: imull $257, %ecx, %eax # imm = 0x101
-; AVX-NOPOPCNT-NEXT: movzbl %ah, %eax # NOREX
-; AVX-NOPOPCNT-NEXT: vmovd %xmm0, %ecx
-; AVX-NOPOPCNT-NEXT: movl %ecx, %edx
-; AVX-NOPOPCNT-NEXT: shrl %edx
-; AVX-NOPOPCNT-NEXT: andl $21845, %edx # imm = 0x5555
-; AVX-NOPOPCNT-NEXT: subl %edx, %ecx
-; AVX-NOPOPCNT-NEXT: movl %ecx, %edx
-; AVX-NOPOPCNT-NEXT: andl $13107, %edx # imm = 0x3333
-; AVX-NOPOPCNT-NEXT: shrl $2, %ecx
-; AVX-NOPOPCNT-NEXT: andl $13107, %ecx # imm = 0x3333
-; AVX-NOPOPCNT-NEXT: addl %edx, %ecx
-; AVX-NOPOPCNT-NEXT: movl %ecx, %edx
-; AVX-NOPOPCNT-NEXT: andl $65520, %edx # imm = 0xFFF0
-; AVX-NOPOPCNT-NEXT: shrl $4, %edx
-; AVX-NOPOPCNT-NEXT: addl %ecx, %edx
-; AVX-NOPOPCNT-NEXT: andl $3855, %edx # imm = 0xF0F
-; AVX-NOPOPCNT-NEXT: imull $257, %edx, %ecx # imm = 0x101
-; AVX-NOPOPCNT-NEXT: movzbl %ch, %ecx # NOREX
-; AVX-NOPOPCNT-NEXT: vmovd %ecx, %xmm1
-; AVX-NOPOPCNT-NEXT: vpinsrw $1, %eax, %xmm1, %xmm1
-; AVX-NOPOPCNT-NEXT: vpextrw $2, %xmm0, %eax
-; AVX-NOPOPCNT-NEXT: movl %eax, %ecx
-; AVX-NOPOPCNT-NEXT: shrl %ecx
-; AVX-NOPOPCNT-NEXT: andl $21845, %ecx # imm = 0x5555
-; AVX-NOPOPCNT-NEXT: subl %ecx, %eax
-; AVX-NOPOPCNT-NEXT: movl %eax, %ecx
-; AVX-NOPOPCNT-NEXT: andl $13107, %ecx # imm = 0x3333
-; AVX-NOPOPCNT-NEXT: shrl $2, %eax
-; AVX-NOPOPCNT-NEXT: andl $13107, %eax # imm = 0x3333
-; AVX-NOPOPCNT-NEXT: addl %ecx, %eax
-; AVX-NOPOPCNT-NEXT: movl %eax, %ecx
-; AVX-NOPOPCNT-NEXT: andl $65520, %ecx # imm = 0xFFF0
-; AVX-NOPOPCNT-NEXT: shrl $4, %ecx
-; AVX-NOPOPCNT-NEXT: addl %eax, %ecx
-; AVX-NOPOPCNT-NEXT: andl $3855, %ecx # imm = 0xF0F
-; AVX-NOPOPCNT-NEXT: imull $257, %ecx, %eax # imm = 0x101
-; AVX-NOPOPCNT-NEXT: movzbl %ah, %eax # NOREX
-; AVX-NOPOPCNT-NEXT: vpinsrw $2, %eax, %xmm1, %xmm1
-; AVX-NOPOPCNT-NEXT: vpextrw $3, %xmm0, %eax
-; AVX-NOPOPCNT-NEXT: movl %eax, %ecx
-; AVX-NOPOPCNT-NEXT: shrl %ecx
-; AVX-NOPOPCNT-NEXT: andl $21845, %ecx # imm = 0x5555
-; AVX-NOPOPCNT-NEXT: subl %ecx, %eax
-; AVX-NOPOPCNT-NEXT: movl %eax, %ecx
-; AVX-NOPOPCNT-NEXT: andl $13107, %ecx # imm = 0x3333
-; AVX-NOPOPCNT-NEXT: shrl $2, %eax
-; AVX-NOPOPCNT-NEXT: andl $13107, %eax # imm = 0x3333
-; AVX-NOPOPCNT-NEXT: addl %ecx, %eax
-; AVX-NOPOPCNT-NEXT: movl %eax, %ecx
-; AVX-NOPOPCNT-NEXT: andl $65520, %ecx # imm = 0xFFF0
-; AVX-NOPOPCNT-NEXT: shrl $4, %ecx
-; AVX-NOPOPCNT-NEXT: addl %eax, %ecx
-; AVX-NOPOPCNT-NEXT: andl $3855, %ecx # imm = 0xF0F
-; AVX-NOPOPCNT-NEXT: imull $257, %ecx, %eax # imm = 0x101
-; AVX-NOPOPCNT-NEXT: movzbl %ah, %eax # NOREX
-; AVX-NOPOPCNT-NEXT: vpinsrw $3, %eax, %xmm1, %xmm1
-; AVX-NOPOPCNT-NEXT: vpextrw $4, %xmm0, %eax
-; AVX-NOPOPCNT-NEXT: movl %eax, %ecx
-; AVX-NOPOPCNT-NEXT: shrl %ecx
-; AVX-NOPOPCNT-NEXT: andl $21845, %ecx # imm = 0x5555
-; AVX-NOPOPCNT-NEXT: subl %ecx, %eax
-; AVX-NOPOPCNT-NEXT: movl %eax, %ecx
-; AVX-NOPOPCNT-NEXT: andl $13107, %ecx # imm = 0x3333
-; AVX-NOPOPCNT-NEXT: shrl $2, %eax
-; AVX-NOPOPCNT-NEXT: andl $13107, %eax # imm = 0x3333
-; AVX-NOPOPCNT-NEXT: addl %ecx, %eax
-; AVX-NOPOPCNT-NEXT: movl %eax, %ecx
-; AVX-NOPOPCNT-NEXT: andl $65520, %ecx # imm = 0xFFF0
-; AVX-NOPOPCNT-NEXT: shrl $4, %ecx
-; AVX-NOPOPCNT-NEXT: addl %eax, %ecx
-; AVX-NOPOPCNT-NEXT: andl $3855, %ecx # imm = 0xF0F
-; AVX-NOPOPCNT-NEXT: imull $257, %ecx, %eax # imm = 0x101
-; AVX-NOPOPCNT-NEXT: movzbl %ah, %eax # NOREX
-; AVX-NOPOPCNT-NEXT: vpinsrw $4, %eax, %xmm1, %xmm1
-; AVX-NOPOPCNT-NEXT: vpextrw $5, %xmm0, %eax
-; AVX-NOPOPCNT-NEXT: movl %eax, %ecx
-; AVX-NOPOPCNT-NEXT: shrl %ecx
-; AVX-NOPOPCNT-NEXT: andl $21845, %ecx # imm = 0x5555
-; AVX-NOPOPCNT-NEXT: subl %ecx, %eax
-; AVX-NOPOPCNT-NEXT: movl %eax, %ecx
-; AVX-NOPOPCNT-NEXT: andl $13107, %ecx # imm = 0x3333
-; AVX-NOPOPCNT-NEXT: shrl $2, %eax
-; AVX-NOPOPCNT-NEXT: andl $13107, %eax # imm = 0x3333
-; AVX-NOPOPCNT-NEXT: addl %ecx, %eax
-; AVX-NOPOPCNT-NEXT: movl %eax, %ecx
-; AVX-NOPOPCNT-NEXT: andl $65520, %ecx # imm = 0xFFF0
-; AVX-NOPOPCNT-NEXT: shrl $4, %ecx
-; AVX-NOPOPCNT-NEXT: addl %eax, %ecx
-; AVX-NOPOPCNT-NEXT: andl $3855, %ecx # imm = 0xF0F
-; AVX-NOPOPCNT-NEXT: imull $257, %ecx, %eax # imm = 0x101
-; AVX-NOPOPCNT-NEXT: movzbl %ah, %eax # NOREX
-; AVX-NOPOPCNT-NEXT: vpinsrw $5, %eax, %xmm1, %xmm1
-; AVX-NOPOPCNT-NEXT: vpextrw $6, %xmm0, %eax
-; AVX-NOPOPCNT-NEXT: movl %eax, %ecx
-; AVX-NOPOPCNT-NEXT: shrl %ecx
-; AVX-NOPOPCNT-NEXT: andl $21845, %ecx # imm = 0x5555
-; AVX-NOPOPCNT-NEXT: subl %ecx, %eax
-; AVX-NOPOPCNT-NEXT: movl %eax, %ecx
-; AVX-NOPOPCNT-NEXT: andl $13107, %ecx # imm = 0x3333
-; AVX-NOPOPCNT-NEXT: shrl $2, %eax
-; AVX-NOPOPCNT-NEXT: andl $13107, %eax # imm = 0x3333
-; AVX-NOPOPCNT-NEXT: addl %ecx, %eax
-; AVX-NOPOPCNT-NEXT: movl %eax, %ecx
-; AVX-NOPOPCNT-NEXT: andl $65520, %ecx # imm = 0xFFF0
-; AVX-NOPOPCNT-NEXT: shrl $4, %ecx
-; AVX-NOPOPCNT-NEXT: addl %eax, %ecx
-; AVX-NOPOPCNT-NEXT: andl $3855, %ecx # imm = 0xF0F
-; AVX-NOPOPCNT-NEXT: imull $257, %ecx, %eax # imm = 0x101
-; AVX-NOPOPCNT-NEXT: movzbl %ah, %eax # NOREX
-; AVX-NOPOPCNT-NEXT: vpinsrw $6, %eax, %xmm1, %xmm1
-; AVX-NOPOPCNT-NEXT: vpextrw $7, %xmm0, %eax
-; AVX-NOPOPCNT-NEXT: movl %eax, %ecx
-; AVX-NOPOPCNT-NEXT: shrl %ecx
-; AVX-NOPOPCNT-NEXT: andl $21845, %ecx # imm = 0x5555
-; AVX-NOPOPCNT-NEXT: subl %ecx, %eax
-; AVX-NOPOPCNT-NEXT: movl %eax, %ecx
-; AVX-NOPOPCNT-NEXT: andl $13107, %ecx # imm = 0x3333
-; AVX-NOPOPCNT-NEXT: shrl $2, %eax
-; AVX-NOPOPCNT-NEXT: andl $13107, %eax # imm = 0x3333
-; AVX-NOPOPCNT-NEXT: addl %ecx, %eax
-; AVX-NOPOPCNT-NEXT: movl %eax, %ecx
-; AVX-NOPOPCNT-NEXT: andl $65520, %ecx # imm = 0xFFF0
-; AVX-NOPOPCNT-NEXT: shrl $4, %ecx
-; AVX-NOPOPCNT-NEXT: addl %eax, %ecx
-; AVX-NOPOPCNT-NEXT: andl $3855, %ecx # imm = 0xF0F
-; AVX-NOPOPCNT-NEXT: imull $257, %ecx, %eax # imm = 0x101
-; AVX-NOPOPCNT-NEXT: movzbl %ah, %eax # NOREX
-; AVX-NOPOPCNT-NEXT: vpinsrw $7, %eax, %xmm1, %xmm0
-; AVX-NOPOPCNT-NEXT: retq
- %out = call <8 x i16> @llvm.ctpop.v8i16(<8 x i16> %in)
- ret <8 x i16> %out
-}
-
-declare <4 x i32> @llvm.ctpop.v4i32(<4 x i32>)
-declare <32 x i8> @llvm.ctpop.v32i8(<32 x i8>)
-declare <4 x i64> @llvm.ctpop.v4i64(<4 x i64>)
-declare <8 x i32> @llvm.ctpop.v8i32(<8 x i32>)
-declare <2 x i64> @llvm.ctpop.v2i64(<2 x i64>)
-declare <16 x i8> @llvm.ctpop.v16i8(<16 x i8>)
-declare <16 x i16> @llvm.ctpop.v16i16(<16 x i16>)
-declare <8 x i16> @llvm.ctpop.v8i16(<8 x i16>)
+++ /dev/null
-; RUN: llc < %s -mtriple=x86_64-unknown-unknown -mattr=+avx2 -mattr=+popcnt | FileCheck --check-prefix=AVX2 --check-prefix=AVX2-POPCNT %s
-; RUN: llc < %s -mtriple=x86_64-unknown-unknown -mattr=+avx2 -mattr=-popcnt | FileCheck --check-prefix=AVX2 --check-prefix=AVX2-NOPOPCNT %s
-
-; When avx2 is enabled, we should always generate the same code regardless
-; of popcnt instruction availability.
-
-define <32 x i8> @testv32i8(<32 x i8> %in) {
-; AVX2-POPCNT-LABEL: testv32i8:
-; AVX2-POPCNT: # BB#0:
-; AVX2-POPCNT-NEXT: vextracti128 $1, %ymm0, %xmm1
-; AVX2-POPCNT-NEXT: vpextrb $1, %xmm1, %eax
-; AVX2-POPCNT-NEXT: popcntw %ax, %ax
-; AVX2-POPCNT-NEXT: vpextrb $0, %xmm1, %ecx
-; AVX2-POPCNT-NEXT: popcntw %cx, %cx
-; AVX2-POPCNT-NEXT: vmovd %ecx, %xmm2
-; AVX2-POPCNT-NEXT: vpinsrb $1, %eax, %xmm2, %xmm2
-; AVX2-POPCNT-NEXT: vpextrb $2, %xmm1, %eax
-; AVX2-POPCNT-NEXT: popcntw %ax, %ax
-; AVX2-POPCNT-NEXT: vpinsrb $2, %eax, %xmm2, %xmm2
-; AVX2-POPCNT-NEXT: vpextrb $3, %xmm1, %eax
-; AVX2-POPCNT-NEXT: popcntw %ax, %ax
-; AVX2-POPCNT-NEXT: vpinsrb $3, %eax, %xmm2, %xmm2
-; AVX2-POPCNT-NEXT: vpextrb $4, %xmm1, %eax
-; AVX2-POPCNT-NEXT: popcntw %ax, %ax
-; AVX2-POPCNT-NEXT: vpinsrb $4, %eax, %xmm2, %xmm2
-; AVX2-POPCNT-NEXT: vpextrb $5, %xmm1, %eax
-; AVX2-POPCNT-NEXT: popcntw %ax, %ax
-; AVX2-POPCNT-NEXT: vpinsrb $5, %eax, %xmm2, %xmm2
-; AVX2-POPCNT-NEXT: vpextrb $6, %xmm1, %eax
-; AVX2-POPCNT-NEXT: popcntw %ax, %ax
-; AVX2-POPCNT-NEXT: vpinsrb $6, %eax, %xmm2, %xmm2
-; AVX2-POPCNT-NEXT: vpextrb $7, %xmm1, %eax
-; AVX2-POPCNT-NEXT: popcntw %ax, %ax
-; AVX2-POPCNT-NEXT: vpinsrb $7, %eax, %xmm2, %xmm2
-; AVX2-POPCNT-NEXT: vpextrb $8, %xmm1, %eax
-; AVX2-POPCNT-NEXT: popcntw %ax, %ax
-; AVX2-POPCNT-NEXT: vpinsrb $8, %eax, %xmm2, %xmm2
-; AVX2-POPCNT-NEXT: vpextrb $9, %xmm1, %eax
-; AVX2-POPCNT-NEXT: popcntw %ax, %ax
-; AVX2-POPCNT-NEXT: vpinsrb $9, %eax, %xmm2, %xmm2
-; AVX2-POPCNT-NEXT: vpextrb $10, %xmm1, %eax
-; AVX2-POPCNT-NEXT: popcntw %ax, %ax
-; AVX2-POPCNT-NEXT: vpinsrb $10, %eax, %xmm2, %xmm2
-; AVX2-POPCNT-NEXT: vpextrb $11, %xmm1, %eax
-; AVX2-POPCNT-NEXT: popcntw %ax, %ax
-; AVX2-POPCNT-NEXT: vpinsrb $11, %eax, %xmm2, %xmm2
-; AVX2-POPCNT-NEXT: vpextrb $12, %xmm1, %eax
-; AVX2-POPCNT-NEXT: popcntw %ax, %ax
-; AVX2-POPCNT-NEXT: vpinsrb $12, %eax, %xmm2, %xmm2
-; AVX2-POPCNT-NEXT: vpextrb $13, %xmm1, %eax
-; AVX2-POPCNT-NEXT: popcntw %ax, %ax
-; AVX2-POPCNT-NEXT: vpinsrb $13, %eax, %xmm2, %xmm2
-; AVX2-POPCNT-NEXT: vpextrb $14, %xmm1, %eax
-; AVX2-POPCNT-NEXT: popcntw %ax, %ax
-; AVX2-POPCNT-NEXT: vpinsrb $14, %eax, %xmm2, %xmm2
-; AVX2-POPCNT-NEXT: vpextrb $15, %xmm1, %eax
-; AVX2-POPCNT-NEXT: popcntw %ax, %ax
-; AVX2-POPCNT-NEXT: vpinsrb $15, %eax, %xmm2, %xmm1
-; AVX2-POPCNT-NEXT: vpextrb $1, %xmm0, %eax
-; AVX2-POPCNT-NEXT: popcntw %ax, %ax
-; AVX2-POPCNT-NEXT: vpextrb $0, %xmm0, %ecx
-; AVX2-POPCNT-NEXT: popcntw %cx, %cx
-; AVX2-POPCNT-NEXT: vmovd %ecx, %xmm2
-; AVX2-POPCNT-NEXT: vpinsrb $1, %eax, %xmm2, %xmm2
-; AVX2-POPCNT-NEXT: vpextrb $2, %xmm0, %eax
-; AVX2-POPCNT-NEXT: popcntw %ax, %ax
-; AVX2-POPCNT-NEXT: vpinsrb $2, %eax, %xmm2, %xmm2
-; AVX2-POPCNT-NEXT: vpextrb $3, %xmm0, %eax
-; AVX2-POPCNT-NEXT: popcntw %ax, %ax
-; AVX2-POPCNT-NEXT: vpinsrb $3, %eax, %xmm2, %xmm2
-; AVX2-POPCNT-NEXT: vpextrb $4, %xmm0, %eax
-; AVX2-POPCNT-NEXT: popcntw %ax, %ax
-; AVX2-POPCNT-NEXT: vpinsrb $4, %eax, %xmm2, %xmm2
-; AVX2-POPCNT-NEXT: vpextrb $5, %xmm0, %eax
-; AVX2-POPCNT-NEXT: popcntw %ax, %ax
-; AVX2-POPCNT-NEXT: vpinsrb $5, %eax, %xmm2, %xmm2
-; AVX2-POPCNT-NEXT: vpextrb $6, %xmm0, %eax
-; AVX2-POPCNT-NEXT: popcntw %ax, %ax
-; AVX2-POPCNT-NEXT: vpinsrb $6, %eax, %xmm2, %xmm2
-; AVX2-POPCNT-NEXT: vpextrb $7, %xmm0, %eax
-; AVX2-POPCNT-NEXT: popcntw %ax, %ax
-; AVX2-POPCNT-NEXT: vpinsrb $7, %eax, %xmm2, %xmm2
-; AVX2-POPCNT-NEXT: vpextrb $8, %xmm0, %eax
-; AVX2-POPCNT-NEXT: popcntw %ax, %ax
-; AVX2-POPCNT-NEXT: vpinsrb $8, %eax, %xmm2, %xmm2
-; AVX2-POPCNT-NEXT: vpextrb $9, %xmm0, %eax
-; AVX2-POPCNT-NEXT: popcntw %ax, %ax
-; AVX2-POPCNT-NEXT: vpinsrb $9, %eax, %xmm2, %xmm2
-; AVX2-POPCNT-NEXT: vpextrb $10, %xmm0, %eax
-; AVX2-POPCNT-NEXT: popcntw %ax, %ax
-; AVX2-POPCNT-NEXT: vpinsrb $10, %eax, %xmm2, %xmm2
-; AVX2-POPCNT-NEXT: vpextrb $11, %xmm0, %eax
-; AVX2-POPCNT-NEXT: popcntw %ax, %ax
-; AVX2-POPCNT-NEXT: vpinsrb $11, %eax, %xmm2, %xmm2
-; AVX2-POPCNT-NEXT: vpextrb $12, %xmm0, %eax
-; AVX2-POPCNT-NEXT: popcntw %ax, %ax
-; AVX2-POPCNT-NEXT: vpinsrb $12, %eax, %xmm2, %xmm2
-; AVX2-POPCNT-NEXT: vpextrb $13, %xmm0, %eax
-; AVX2-POPCNT-NEXT: popcntw %ax, %ax
-; AVX2-POPCNT-NEXT: vpinsrb $13, %eax, %xmm2, %xmm2
-; AVX2-POPCNT-NEXT: vpextrb $14, %xmm0, %eax
-; AVX2-POPCNT-NEXT: popcntw %ax, %ax
-; AVX2-POPCNT-NEXT: vpinsrb $14, %eax, %xmm2, %xmm2
-; AVX2-POPCNT-NEXT: vpextrb $15, %xmm0, %eax
-; AVX2-POPCNT-NEXT: popcntw %ax, %ax
-; AVX2-POPCNT-NEXT: vpinsrb $15, %eax, %xmm2, %xmm0
-; AVX2-POPCNT-NEXT: vinserti128 $1, %xmm1, %ymm0, %ymm0
-; AVX2-POPCNT-NEXT: retq
-;
-; AVX2-NOPOPCNT-LABEL: testv32i8:
-; AVX2-NOPOPCNT: # BB#0:
-; AVX2-NOPOPCNT-NEXT: vextracti128 $1, %ymm0, %xmm1
-; AVX2-NOPOPCNT-NEXT: vpextrb $1, %xmm1, %eax
-; AVX2-NOPOPCNT-NEXT: movb %al, %cl
-; AVX2-NOPOPCNT-NEXT: shrb %cl
-; AVX2-NOPOPCNT-NEXT: andb $85, %cl
-; AVX2-NOPOPCNT-NEXT: subb %cl, %al
-; AVX2-NOPOPCNT-NEXT: movb %al, %cl
-; AVX2-NOPOPCNT-NEXT: andb $51, %cl
-; AVX2-NOPOPCNT-NEXT: shrb $2, %al
-; AVX2-NOPOPCNT-NEXT: andb $51, %al
-; AVX2-NOPOPCNT-NEXT: addb %cl, %al
-; AVX2-NOPOPCNT-NEXT: movb %al, %cl
-; AVX2-NOPOPCNT-NEXT: shrb $4, %cl
-; AVX2-NOPOPCNT-NEXT: addb %al, %cl
-; AVX2-NOPOPCNT-NEXT: andb $15, %cl
-; AVX2-NOPOPCNT-NEXT: movzbl %cl, %eax
-; AVX2-NOPOPCNT-NEXT: vpextrb $0, %xmm1, %ecx
-; AVX2-NOPOPCNT-NEXT: movb %cl, %dl
-; AVX2-NOPOPCNT-NEXT: shrb %dl
-; AVX2-NOPOPCNT-NEXT: andb $85, %dl
-; AVX2-NOPOPCNT-NEXT: subb %dl, %cl
-; AVX2-NOPOPCNT-NEXT: movb %cl, %dl
-; AVX2-NOPOPCNT-NEXT: andb $51, %dl
-; AVX2-NOPOPCNT-NEXT: shrb $2, %cl
-; AVX2-NOPOPCNT-NEXT: andb $51, %cl
-; AVX2-NOPOPCNT-NEXT: addb %dl, %cl
-; AVX2-NOPOPCNT-NEXT: movb %cl, %dl
-; AVX2-NOPOPCNT-NEXT: shrb $4, %dl
-; AVX2-NOPOPCNT-NEXT: addb %cl, %dl
-; AVX2-NOPOPCNT-NEXT: andb $15, %dl
-; AVX2-NOPOPCNT-NEXT: movzbl %dl, %ecx
-; AVX2-NOPOPCNT-NEXT: vmovd %ecx, %xmm2
-; AVX2-NOPOPCNT-NEXT: vpinsrb $1, %eax, %xmm2, %xmm2
-; AVX2-NOPOPCNT-NEXT: vpextrb $2, %xmm1, %eax
-; AVX2-NOPOPCNT-NEXT: movb %al, %cl
-; AVX2-NOPOPCNT-NEXT: shrb %cl
-; AVX2-NOPOPCNT-NEXT: andb $85, %cl
-; AVX2-NOPOPCNT-NEXT: subb %cl, %al
-; AVX2-NOPOPCNT-NEXT: movb %al, %cl
-; AVX2-NOPOPCNT-NEXT: andb $51, %cl
-; AVX2-NOPOPCNT-NEXT: shrb $2, %al
-; AVX2-NOPOPCNT-NEXT: andb $51, %al
-; AVX2-NOPOPCNT-NEXT: addb %cl, %al
-; AVX2-NOPOPCNT-NEXT: movb %al, %cl
-; AVX2-NOPOPCNT-NEXT: shrb $4, %cl
-; AVX2-NOPOPCNT-NEXT: addb %al, %cl
-; AVX2-NOPOPCNT-NEXT: andb $15, %cl
-; AVX2-NOPOPCNT-NEXT: movzbl %cl, %eax
-; AVX2-NOPOPCNT-NEXT: vpinsrb $2, %eax, %xmm2, %xmm2
-; AVX2-NOPOPCNT-NEXT: vpextrb $3, %xmm1, %eax
-; AVX2-NOPOPCNT-NEXT: movb %al, %cl
-; AVX2-NOPOPCNT-NEXT: shrb %cl
-; AVX2-NOPOPCNT-NEXT: andb $85, %cl
-; AVX2-NOPOPCNT-NEXT: subb %cl, %al
-; AVX2-NOPOPCNT-NEXT: movb %al, %cl
-; AVX2-NOPOPCNT-NEXT: andb $51, %cl
-; AVX2-NOPOPCNT-NEXT: shrb $2, %al
-; AVX2-NOPOPCNT-NEXT: andb $51, %al
-; AVX2-NOPOPCNT-NEXT: addb %cl, %al
-; AVX2-NOPOPCNT-NEXT: movb %al, %cl
-; AVX2-NOPOPCNT-NEXT: shrb $4, %cl
-; AVX2-NOPOPCNT-NEXT: addb %al, %cl
-; AVX2-NOPOPCNT-NEXT: andb $15, %cl
-; AVX2-NOPOPCNT-NEXT: movzbl %cl, %eax
-; AVX2-NOPOPCNT-NEXT: vpinsrb $3, %eax, %xmm2, %xmm2
-; AVX2-NOPOPCNT-NEXT: vpextrb $4, %xmm1, %eax
-; AVX2-NOPOPCNT-NEXT: movb %al, %cl
-; AVX2-NOPOPCNT-NEXT: shrb %cl
-; AVX2-NOPOPCNT-NEXT: andb $85, %cl
-; AVX2-NOPOPCNT-NEXT: subb %cl, %al
-; AVX2-NOPOPCNT-NEXT: movb %al, %cl
-; AVX2-NOPOPCNT-NEXT: andb $51, %cl
-; AVX2-NOPOPCNT-NEXT: shrb $2, %al
-; AVX2-NOPOPCNT-NEXT: andb $51, %al
-; AVX2-NOPOPCNT-NEXT: addb %cl, %al
-; AVX2-NOPOPCNT-NEXT: movb %al, %cl
-; AVX2-NOPOPCNT-NEXT: shrb $4, %cl
-; AVX2-NOPOPCNT-NEXT: addb %al, %cl
-; AVX2-NOPOPCNT-NEXT: andb $15, %cl
-; AVX2-NOPOPCNT-NEXT: movzbl %cl, %eax
-; AVX2-NOPOPCNT-NEXT: vpinsrb $4, %eax, %xmm2, %xmm2
-; AVX2-NOPOPCNT-NEXT: vpextrb $5, %xmm1, %eax
-; AVX2-NOPOPCNT-NEXT: movb %al, %cl
-; AVX2-NOPOPCNT-NEXT: shrb %cl
-; AVX2-NOPOPCNT-NEXT: andb $85, %cl
-; AVX2-NOPOPCNT-NEXT: subb %cl, %al
-; AVX2-NOPOPCNT-NEXT: movb %al, %cl
-; AVX2-NOPOPCNT-NEXT: andb $51, %cl
-; AVX2-NOPOPCNT-NEXT: shrb $2, %al
-; AVX2-NOPOPCNT-NEXT: andb $51, %al
-; AVX2-NOPOPCNT-NEXT: addb %cl, %al
-; AVX2-NOPOPCNT-NEXT: movb %al, %cl
-; AVX2-NOPOPCNT-NEXT: shrb $4, %cl
-; AVX2-NOPOPCNT-NEXT: addb %al, %cl
-; AVX2-NOPOPCNT-NEXT: andb $15, %cl
-; AVX2-NOPOPCNT-NEXT: movzbl %cl, %eax
-; AVX2-NOPOPCNT-NEXT: vpinsrb $5, %eax, %xmm2, %xmm2
-; AVX2-NOPOPCNT-NEXT: vpextrb $6, %xmm1, %eax
-; AVX2-NOPOPCNT-NEXT: movb %al, %cl
-; AVX2-NOPOPCNT-NEXT: shrb %cl
-; AVX2-NOPOPCNT-NEXT: andb $85, %cl
-; AVX2-NOPOPCNT-NEXT: subb %cl, %al
-; AVX2-NOPOPCNT-NEXT: movb %al, %cl
-; AVX2-NOPOPCNT-NEXT: andb $51, %cl
-; AVX2-NOPOPCNT-NEXT: shrb $2, %al
-; AVX2-NOPOPCNT-NEXT: andb $51, %al
-; AVX2-NOPOPCNT-NEXT: addb %cl, %al
-; AVX2-NOPOPCNT-NEXT: movb %al, %cl
-; AVX2-NOPOPCNT-NEXT: shrb $4, %cl
-; AVX2-NOPOPCNT-NEXT: addb %al, %cl
-; AVX2-NOPOPCNT-NEXT: andb $15, %cl
-; AVX2-NOPOPCNT-NEXT: movzbl %cl, %eax
-; AVX2-NOPOPCNT-NEXT: vpinsrb $6, %eax, %xmm2, %xmm2
-; AVX2-NOPOPCNT-NEXT: vpextrb $7, %xmm1, %eax
-; AVX2-NOPOPCNT-NEXT: movb %al, %cl
-; AVX2-NOPOPCNT-NEXT: shrb %cl
-; AVX2-NOPOPCNT-NEXT: andb $85, %cl
-; AVX2-NOPOPCNT-NEXT: subb %cl, %al
-; AVX2-NOPOPCNT-NEXT: movb %al, %cl
-; AVX2-NOPOPCNT-NEXT: andb $51, %cl
-; AVX2-NOPOPCNT-NEXT: shrb $2, %al
-; AVX2-NOPOPCNT-NEXT: andb $51, %al
-; AVX2-NOPOPCNT-NEXT: addb %cl, %al
-; AVX2-NOPOPCNT-NEXT: movb %al, %cl
-; AVX2-NOPOPCNT-NEXT: shrb $4, %cl
-; AVX2-NOPOPCNT-NEXT: addb %al, %cl
-; AVX2-NOPOPCNT-NEXT: andb $15, %cl
-; AVX2-NOPOPCNT-NEXT: movzbl %cl, %eax
-; AVX2-NOPOPCNT-NEXT: vpinsrb $7, %eax, %xmm2, %xmm2
-; AVX2-NOPOPCNT-NEXT: vpextrb $8, %xmm1, %eax
-; AVX2-NOPOPCNT-NEXT: movb %al, %cl
-; AVX2-NOPOPCNT-NEXT: shrb %cl
-; AVX2-NOPOPCNT-NEXT: andb $85, %cl
-; AVX2-NOPOPCNT-NEXT: subb %cl, %al
-; AVX2-NOPOPCNT-NEXT: movb %al, %cl
-; AVX2-NOPOPCNT-NEXT: andb $51, %cl
-; AVX2-NOPOPCNT-NEXT: shrb $2, %al
-; AVX2-NOPOPCNT-NEXT: andb $51, %al
-; AVX2-NOPOPCNT-NEXT: addb %cl, %al
-; AVX2-NOPOPCNT-NEXT: movb %al, %cl
-; AVX2-NOPOPCNT-NEXT: shrb $4, %cl
-; AVX2-NOPOPCNT-NEXT: addb %al, %cl
-; AVX2-NOPOPCNT-NEXT: andb $15, %cl
-; AVX2-NOPOPCNT-NEXT: movzbl %cl, %eax
-; AVX2-NOPOPCNT-NEXT: vpinsrb $8, %eax, %xmm2, %xmm2
-; AVX2-NOPOPCNT-NEXT: vpextrb $9, %xmm1, %eax
-; AVX2-NOPOPCNT-NEXT: movb %al, %cl
-; AVX2-NOPOPCNT-NEXT: shrb %cl
-; AVX2-NOPOPCNT-NEXT: andb $85, %cl
-; AVX2-NOPOPCNT-NEXT: subb %cl, %al
-; AVX2-NOPOPCNT-NEXT: movb %al, %cl
-; AVX2-NOPOPCNT-NEXT: andb $51, %cl
-; AVX2-NOPOPCNT-NEXT: shrb $2, %al
-; AVX2-NOPOPCNT-NEXT: andb $51, %al
-; AVX2-NOPOPCNT-NEXT: addb %cl, %al
-; AVX2-NOPOPCNT-NEXT: movb %al, %cl
-; AVX2-NOPOPCNT-NEXT: shrb $4, %cl
-; AVX2-NOPOPCNT-NEXT: addb %al, %cl
-; AVX2-NOPOPCNT-NEXT: andb $15, %cl
-; AVX2-NOPOPCNT-NEXT: movzbl %cl, %eax
-; AVX2-NOPOPCNT-NEXT: vpinsrb $9, %eax, %xmm2, %xmm2
-; AVX2-NOPOPCNT-NEXT: vpextrb $10, %xmm1, %eax
-; AVX2-NOPOPCNT-NEXT: movb %al, %cl
-; AVX2-NOPOPCNT-NEXT: shrb %cl
-; AVX2-NOPOPCNT-NEXT: andb $85, %cl
-; AVX2-NOPOPCNT-NEXT: subb %cl, %al
-; AVX2-NOPOPCNT-NEXT: movb %al, %cl
-; AVX2-NOPOPCNT-NEXT: andb $51, %cl
-; AVX2-NOPOPCNT-NEXT: shrb $2, %al
-; AVX2-NOPOPCNT-NEXT: andb $51, %al
-; AVX2-NOPOPCNT-NEXT: addb %cl, %al
-; AVX2-NOPOPCNT-NEXT: movb %al, %cl
-; AVX2-NOPOPCNT-NEXT: shrb $4, %cl
-; AVX2-NOPOPCNT-NEXT: addb %al, %cl
-; AVX2-NOPOPCNT-NEXT: andb $15, %cl
-; AVX2-NOPOPCNT-NEXT: movzbl %cl, %eax
-; AVX2-NOPOPCNT-NEXT: vpinsrb $10, %eax, %xmm2, %xmm2
-; AVX2-NOPOPCNT-NEXT: vpextrb $11, %xmm1, %eax
-; AVX2-NOPOPCNT-NEXT: movb %al, %cl
-; AVX2-NOPOPCNT-NEXT: shrb %cl
-; AVX2-NOPOPCNT-NEXT: andb $85, %cl
-; AVX2-NOPOPCNT-NEXT: subb %cl, %al
-; AVX2-NOPOPCNT-NEXT: movb %al, %cl
-; AVX2-NOPOPCNT-NEXT: andb $51, %cl
-; AVX2-NOPOPCNT-NEXT: shrb $2, %al
-; AVX2-NOPOPCNT-NEXT: andb $51, %al
-; AVX2-NOPOPCNT-NEXT: addb %cl, %al
-; AVX2-NOPOPCNT-NEXT: movb %al, %cl
-; AVX2-NOPOPCNT-NEXT: shrb $4, %cl
-; AVX2-NOPOPCNT-NEXT: addb %al, %cl
-; AVX2-NOPOPCNT-NEXT: andb $15, %cl
-; AVX2-NOPOPCNT-NEXT: movzbl %cl, %eax
-; AVX2-NOPOPCNT-NEXT: vpinsrb $11, %eax, %xmm2, %xmm2
-; AVX2-NOPOPCNT-NEXT: vpextrb $12, %xmm1, %eax
-; AVX2-NOPOPCNT-NEXT: movb %al, %cl
-; AVX2-NOPOPCNT-NEXT: shrb %cl
-; AVX2-NOPOPCNT-NEXT: andb $85, %cl
-; AVX2-NOPOPCNT-NEXT: subb %cl, %al
-; AVX2-NOPOPCNT-NEXT: movb %al, %cl
-; AVX2-NOPOPCNT-NEXT: andb $51, %cl
-; AVX2-NOPOPCNT-NEXT: shrb $2, %al
-; AVX2-NOPOPCNT-NEXT: andb $51, %al
-; AVX2-NOPOPCNT-NEXT: addb %cl, %al
-; AVX2-NOPOPCNT-NEXT: movb %al, %cl
-; AVX2-NOPOPCNT-NEXT: shrb $4, %cl
-; AVX2-NOPOPCNT-NEXT: addb %al, %cl
-; AVX2-NOPOPCNT-NEXT: andb $15, %cl
-; AVX2-NOPOPCNT-NEXT: movzbl %cl, %eax
-; AVX2-NOPOPCNT-NEXT: vpinsrb $12, %eax, %xmm2, %xmm2
-; AVX2-NOPOPCNT-NEXT: vpextrb $13, %xmm1, %eax
-; AVX2-NOPOPCNT-NEXT: movb %al, %cl
-; AVX2-NOPOPCNT-NEXT: shrb %cl
-; AVX2-NOPOPCNT-NEXT: andb $85, %cl
-; AVX2-NOPOPCNT-NEXT: subb %cl, %al
-; AVX2-NOPOPCNT-NEXT: movb %al, %cl
-; AVX2-NOPOPCNT-NEXT: andb $51, %cl
-; AVX2-NOPOPCNT-NEXT: shrb $2, %al
-; AVX2-NOPOPCNT-NEXT: andb $51, %al
-; AVX2-NOPOPCNT-NEXT: addb %cl, %al
-; AVX2-NOPOPCNT-NEXT: movb %al, %cl
-; AVX2-NOPOPCNT-NEXT: shrb $4, %cl
-; AVX2-NOPOPCNT-NEXT: addb %al, %cl
-; AVX2-NOPOPCNT-NEXT: andb $15, %cl
-; AVX2-NOPOPCNT-NEXT: movzbl %cl, %eax
-; AVX2-NOPOPCNT-NEXT: vpinsrb $13, %eax, %xmm2, %xmm2
-; AVX2-NOPOPCNT-NEXT: vpextrb $14, %xmm1, %eax
-; AVX2-NOPOPCNT-NEXT: movb %al, %cl
-; AVX2-NOPOPCNT-NEXT: shrb %cl
-; AVX2-NOPOPCNT-NEXT: andb $85, %cl
-; AVX2-NOPOPCNT-NEXT: subb %cl, %al
-; AVX2-NOPOPCNT-NEXT: movb %al, %cl
-; AVX2-NOPOPCNT-NEXT: andb $51, %cl
-; AVX2-NOPOPCNT-NEXT: shrb $2, %al
-; AVX2-NOPOPCNT-NEXT: andb $51, %al
-; AVX2-NOPOPCNT-NEXT: addb %cl, %al
-; AVX2-NOPOPCNT-NEXT: movb %al, %cl
-; AVX2-NOPOPCNT-NEXT: shrb $4, %cl
-; AVX2-NOPOPCNT-NEXT: addb %al, %cl
-; AVX2-NOPOPCNT-NEXT: andb $15, %cl
-; AVX2-NOPOPCNT-NEXT: movzbl %cl, %eax
-; AVX2-NOPOPCNT-NEXT: vpinsrb $14, %eax, %xmm2, %xmm2
-; AVX2-NOPOPCNT-NEXT: vpextrb $15, %xmm1, %eax
-; AVX2-NOPOPCNT-NEXT: movb %al, %cl
-; AVX2-NOPOPCNT-NEXT: shrb %cl
-; AVX2-NOPOPCNT-NEXT: andb $85, %cl
-; AVX2-NOPOPCNT-NEXT: subb %cl, %al
-; AVX2-NOPOPCNT-NEXT: movb %al, %cl
-; AVX2-NOPOPCNT-NEXT: andb $51, %cl
-; AVX2-NOPOPCNT-NEXT: shrb $2, %al
-; AVX2-NOPOPCNT-NEXT: andb $51, %al
-; AVX2-NOPOPCNT-NEXT: addb %cl, %al
-; AVX2-NOPOPCNT-NEXT: movb %al, %cl
-; AVX2-NOPOPCNT-NEXT: shrb $4, %cl
-; AVX2-NOPOPCNT-NEXT: addb %al, %cl
-; AVX2-NOPOPCNT-NEXT: andb $15, %cl
-; AVX2-NOPOPCNT-NEXT: movzbl %cl, %eax
-; AVX2-NOPOPCNT-NEXT: vpinsrb $15, %eax, %xmm2, %xmm1
-; AVX2-NOPOPCNT-NEXT: vpextrb $1, %xmm0, %eax
-; AVX2-NOPOPCNT-NEXT: movb %al, %cl
-; AVX2-NOPOPCNT-NEXT: shrb %cl
-; AVX2-NOPOPCNT-NEXT: andb $85, %cl
-; AVX2-NOPOPCNT-NEXT: subb %cl, %al
-; AVX2-NOPOPCNT-NEXT: movb %al, %cl
-; AVX2-NOPOPCNT-NEXT: andb $51, %cl
-; AVX2-NOPOPCNT-NEXT: shrb $2, %al
-; AVX2-NOPOPCNT-NEXT: andb $51, %al
-; AVX2-NOPOPCNT-NEXT: addb %cl, %al
-; AVX2-NOPOPCNT-NEXT: movb %al, %cl
-; AVX2-NOPOPCNT-NEXT: shrb $4, %cl
-; AVX2-NOPOPCNT-NEXT: addb %al, %cl
-; AVX2-NOPOPCNT-NEXT: andb $15, %cl
-; AVX2-NOPOPCNT-NEXT: movzbl %cl, %eax
-; AVX2-NOPOPCNT-NEXT: vpextrb $0, %xmm0, %ecx
-; AVX2-NOPOPCNT-NEXT: movb %cl, %dl
-; AVX2-NOPOPCNT-NEXT: shrb %dl
-; AVX2-NOPOPCNT-NEXT: andb $85, %dl
-; AVX2-NOPOPCNT-NEXT: subb %dl, %cl
-; AVX2-NOPOPCNT-NEXT: movb %cl, %dl
-; AVX2-NOPOPCNT-NEXT: andb $51, %dl
-; AVX2-NOPOPCNT-NEXT: shrb $2, %cl
-; AVX2-NOPOPCNT-NEXT: andb $51, %cl
-; AVX2-NOPOPCNT-NEXT: addb %dl, %cl
-; AVX2-NOPOPCNT-NEXT: movb %cl, %dl
-; AVX2-NOPOPCNT-NEXT: shrb $4, %dl
-; AVX2-NOPOPCNT-NEXT: addb %cl, %dl
-; AVX2-NOPOPCNT-NEXT: andb $15, %dl
-; AVX2-NOPOPCNT-NEXT: movzbl %dl, %ecx
-; AVX2-NOPOPCNT-NEXT: vmovd %ecx, %xmm2
-; AVX2-NOPOPCNT-NEXT: vpinsrb $1, %eax, %xmm2, %xmm2
-; AVX2-NOPOPCNT-NEXT: vpextrb $2, %xmm0, %eax
-; AVX2-NOPOPCNT-NEXT: movb %al, %cl
-; AVX2-NOPOPCNT-NEXT: shrb %cl
-; AVX2-NOPOPCNT-NEXT: andb $85, %cl
-; AVX2-NOPOPCNT-NEXT: subb %cl, %al
-; AVX2-NOPOPCNT-NEXT: movb %al, %cl
-; AVX2-NOPOPCNT-NEXT: andb $51, %cl
-; AVX2-NOPOPCNT-NEXT: shrb $2, %al
-; AVX2-NOPOPCNT-NEXT: andb $51, %al
-; AVX2-NOPOPCNT-NEXT: addb %cl, %al
-; AVX2-NOPOPCNT-NEXT: movb %al, %cl
-; AVX2-NOPOPCNT-NEXT: shrb $4, %cl
-; AVX2-NOPOPCNT-NEXT: addb %al, %cl
-; AVX2-NOPOPCNT-NEXT: andb $15, %cl
-; AVX2-NOPOPCNT-NEXT: movzbl %cl, %eax
-; AVX2-NOPOPCNT-NEXT: vpinsrb $2, %eax, %xmm2, %xmm2
-; AVX2-NOPOPCNT-NEXT: vpextrb $3, %xmm0, %eax
-; AVX2-NOPOPCNT-NEXT: movb %al, %cl
-; AVX2-NOPOPCNT-NEXT: shrb %cl
-; AVX2-NOPOPCNT-NEXT: andb $85, %cl
-; AVX2-NOPOPCNT-NEXT: subb %cl, %al
-; AVX2-NOPOPCNT-NEXT: movb %al, %cl
-; AVX2-NOPOPCNT-NEXT: andb $51, %cl
-; AVX2-NOPOPCNT-NEXT: shrb $2, %al
-; AVX2-NOPOPCNT-NEXT: andb $51, %al
-; AVX2-NOPOPCNT-NEXT: addb %cl, %al
-; AVX2-NOPOPCNT-NEXT: movb %al, %cl
-; AVX2-NOPOPCNT-NEXT: shrb $4, %cl
-; AVX2-NOPOPCNT-NEXT: addb %al, %cl
-; AVX2-NOPOPCNT-NEXT: andb $15, %cl
-; AVX2-NOPOPCNT-NEXT: movzbl %cl, %eax
-; AVX2-NOPOPCNT-NEXT: vpinsrb $3, %eax, %xmm2, %xmm2
-; AVX2-NOPOPCNT-NEXT: vpextrb $4, %xmm0, %eax
-; AVX2-NOPOPCNT-NEXT: movb %al, %cl
-; AVX2-NOPOPCNT-NEXT: shrb %cl
-; AVX2-NOPOPCNT-NEXT: andb $85, %cl
-; AVX2-NOPOPCNT-NEXT: subb %cl, %al
-; AVX2-NOPOPCNT-NEXT: movb %al, %cl
-; AVX2-NOPOPCNT-NEXT: andb $51, %cl
-; AVX2-NOPOPCNT-NEXT: shrb $2, %al
-; AVX2-NOPOPCNT-NEXT: andb $51, %al
-; AVX2-NOPOPCNT-NEXT: addb %cl, %al
-; AVX2-NOPOPCNT-NEXT: movb %al, %cl
-; AVX2-NOPOPCNT-NEXT: shrb $4, %cl
-; AVX2-NOPOPCNT-NEXT: addb %al, %cl
-; AVX2-NOPOPCNT-NEXT: andb $15, %cl
-; AVX2-NOPOPCNT-NEXT: movzbl %cl, %eax
-; AVX2-NOPOPCNT-NEXT: vpinsrb $4, %eax, %xmm2, %xmm2
-; AVX2-NOPOPCNT-NEXT: vpextrb $5, %xmm0, %eax
-; AVX2-NOPOPCNT-NEXT: movb %al, %cl
-; AVX2-NOPOPCNT-NEXT: shrb %cl
-; AVX2-NOPOPCNT-NEXT: andb $85, %cl
-; AVX2-NOPOPCNT-NEXT: subb %cl, %al
-; AVX2-NOPOPCNT-NEXT: movb %al, %cl
-; AVX2-NOPOPCNT-NEXT: andb $51, %cl
-; AVX2-NOPOPCNT-NEXT: shrb $2, %al
-; AVX2-NOPOPCNT-NEXT: andb $51, %al
-; AVX2-NOPOPCNT-NEXT: addb %cl, %al
-; AVX2-NOPOPCNT-NEXT: movb %al, %cl
-; AVX2-NOPOPCNT-NEXT: shrb $4, %cl
-; AVX2-NOPOPCNT-NEXT: addb %al, %cl
-; AVX2-NOPOPCNT-NEXT: andb $15, %cl
-; AVX2-NOPOPCNT-NEXT: movzbl %cl, %eax
-; AVX2-NOPOPCNT-NEXT: vpinsrb $5, %eax, %xmm2, %xmm2
-; AVX2-NOPOPCNT-NEXT: vpextrb $6, %xmm0, %eax
-; AVX2-NOPOPCNT-NEXT: movb %al, %cl
-; AVX2-NOPOPCNT-NEXT: shrb %cl
-; AVX2-NOPOPCNT-NEXT: andb $85, %cl
-; AVX2-NOPOPCNT-NEXT: subb %cl, %al
-; AVX2-NOPOPCNT-NEXT: movb %al, %cl
-; AVX2-NOPOPCNT-NEXT: andb $51, %cl
-; AVX2-NOPOPCNT-NEXT: shrb $2, %al
-; AVX2-NOPOPCNT-NEXT: andb $51, %al
-; AVX2-NOPOPCNT-NEXT: addb %cl, %al
-; AVX2-NOPOPCNT-NEXT: movb %al, %cl
-; AVX2-NOPOPCNT-NEXT: shrb $4, %cl
-; AVX2-NOPOPCNT-NEXT: addb %al, %cl
-; AVX2-NOPOPCNT-NEXT: andb $15, %cl
-; AVX2-NOPOPCNT-NEXT: movzbl %cl, %eax
-; AVX2-NOPOPCNT-NEXT: vpinsrb $6, %eax, %xmm2, %xmm2
-; AVX2-NOPOPCNT-NEXT: vpextrb $7, %xmm0, %eax
-; AVX2-NOPOPCNT-NEXT: movb %al, %cl
-; AVX2-NOPOPCNT-NEXT: shrb %cl
-; AVX2-NOPOPCNT-NEXT: andb $85, %cl
-; AVX2-NOPOPCNT-NEXT: subb %cl, %al
-; AVX2-NOPOPCNT-NEXT: movb %al, %cl
-; AVX2-NOPOPCNT-NEXT: andb $51, %cl
-; AVX2-NOPOPCNT-NEXT: shrb $2, %al
-; AVX2-NOPOPCNT-NEXT: andb $51, %al
-; AVX2-NOPOPCNT-NEXT: addb %cl, %al
-; AVX2-NOPOPCNT-NEXT: movb %al, %cl
-; AVX2-NOPOPCNT-NEXT: shrb $4, %cl
-; AVX2-NOPOPCNT-NEXT: addb %al, %cl
-; AVX2-NOPOPCNT-NEXT: andb $15, %cl
-; AVX2-NOPOPCNT-NEXT: movzbl %cl, %eax
-; AVX2-NOPOPCNT-NEXT: vpinsrb $7, %eax, %xmm2, %xmm2
-; AVX2-NOPOPCNT-NEXT: vpextrb $8, %xmm0, %eax
-; AVX2-NOPOPCNT-NEXT: movb %al, %cl
-; AVX2-NOPOPCNT-NEXT: shrb %cl
-; AVX2-NOPOPCNT-NEXT: andb $85, %cl
-; AVX2-NOPOPCNT-NEXT: subb %cl, %al
-; AVX2-NOPOPCNT-NEXT: movb %al, %cl
-; AVX2-NOPOPCNT-NEXT: andb $51, %cl
-; AVX2-NOPOPCNT-NEXT: shrb $2, %al
-; AVX2-NOPOPCNT-NEXT: andb $51, %al
-; AVX2-NOPOPCNT-NEXT: addb %cl, %al
-; AVX2-NOPOPCNT-NEXT: movb %al, %cl
-; AVX2-NOPOPCNT-NEXT: shrb $4, %cl
-; AVX2-NOPOPCNT-NEXT: addb %al, %cl
-; AVX2-NOPOPCNT-NEXT: andb $15, %cl
-; AVX2-NOPOPCNT-NEXT: movzbl %cl, %eax
-; AVX2-NOPOPCNT-NEXT: vpinsrb $8, %eax, %xmm2, %xmm2
-; AVX2-NOPOPCNT-NEXT: vpextrb $9, %xmm0, %eax
-; AVX2-NOPOPCNT-NEXT: movb %al, %cl
-; AVX2-NOPOPCNT-NEXT: shrb %cl
-; AVX2-NOPOPCNT-NEXT: andb $85, %cl
-; AVX2-NOPOPCNT-NEXT: subb %cl, %al
-; AVX2-NOPOPCNT-NEXT: movb %al, %cl
-; AVX2-NOPOPCNT-NEXT: andb $51, %cl
-; AVX2-NOPOPCNT-NEXT: shrb $2, %al
-; AVX2-NOPOPCNT-NEXT: andb $51, %al
-; AVX2-NOPOPCNT-NEXT: addb %cl, %al
-; AVX2-NOPOPCNT-NEXT: movb %al, %cl
-; AVX2-NOPOPCNT-NEXT: shrb $4, %cl
-; AVX2-NOPOPCNT-NEXT: addb %al, %cl
-; AVX2-NOPOPCNT-NEXT: andb $15, %cl
-; AVX2-NOPOPCNT-NEXT: movzbl %cl, %eax
-; AVX2-NOPOPCNT-NEXT: vpinsrb $9, %eax, %xmm2, %xmm2
-; AVX2-NOPOPCNT-NEXT: vpextrb $10, %xmm0, %eax
-; AVX2-NOPOPCNT-NEXT: movb %al, %cl
-; AVX2-NOPOPCNT-NEXT: shrb %cl
-; AVX2-NOPOPCNT-NEXT: andb $85, %cl
-; AVX2-NOPOPCNT-NEXT: subb %cl, %al
-; AVX2-NOPOPCNT-NEXT: movb %al, %cl
-; AVX2-NOPOPCNT-NEXT: andb $51, %cl
-; AVX2-NOPOPCNT-NEXT: shrb $2, %al
-; AVX2-NOPOPCNT-NEXT: andb $51, %al
-; AVX2-NOPOPCNT-NEXT: addb %cl, %al
-; AVX2-NOPOPCNT-NEXT: movb %al, %cl
-; AVX2-NOPOPCNT-NEXT: shrb $4, %cl
-; AVX2-NOPOPCNT-NEXT: addb %al, %cl
-; AVX2-NOPOPCNT-NEXT: andb $15, %cl
-; AVX2-NOPOPCNT-NEXT: movzbl %cl, %eax
-; AVX2-NOPOPCNT-NEXT: vpinsrb $10, %eax, %xmm2, %xmm2
-; AVX2-NOPOPCNT-NEXT: vpextrb $11, %xmm0, %eax
-; AVX2-NOPOPCNT-NEXT: movb %al, %cl
-; AVX2-NOPOPCNT-NEXT: shrb %cl
-; AVX2-NOPOPCNT-NEXT: andb $85, %cl
-; AVX2-NOPOPCNT-NEXT: subb %cl, %al
-; AVX2-NOPOPCNT-NEXT: movb %al, %cl
-; AVX2-NOPOPCNT-NEXT: andb $51, %cl
-; AVX2-NOPOPCNT-NEXT: shrb $2, %al
-; AVX2-NOPOPCNT-NEXT: andb $51, %al
-; AVX2-NOPOPCNT-NEXT: addb %cl, %al
-; AVX2-NOPOPCNT-NEXT: movb %al, %cl
-; AVX2-NOPOPCNT-NEXT: shrb $4, %cl
-; AVX2-NOPOPCNT-NEXT: addb %al, %cl
-; AVX2-NOPOPCNT-NEXT: andb $15, %cl
-; AVX2-NOPOPCNT-NEXT: movzbl %cl, %eax
-; AVX2-NOPOPCNT-NEXT: vpinsrb $11, %eax, %xmm2, %xmm2
-; AVX2-NOPOPCNT-NEXT: vpextrb $12, %xmm0, %eax
-; AVX2-NOPOPCNT-NEXT: movb %al, %cl
-; AVX2-NOPOPCNT-NEXT: shrb %cl
-; AVX2-NOPOPCNT-NEXT: andb $85, %cl
-; AVX2-NOPOPCNT-NEXT: subb %cl, %al
-; AVX2-NOPOPCNT-NEXT: movb %al, %cl
-; AVX2-NOPOPCNT-NEXT: andb $51, %cl
-; AVX2-NOPOPCNT-NEXT: shrb $2, %al
-; AVX2-NOPOPCNT-NEXT: andb $51, %al
-; AVX2-NOPOPCNT-NEXT: addb %cl, %al
-; AVX2-NOPOPCNT-NEXT: movb %al, %cl
-; AVX2-NOPOPCNT-NEXT: shrb $4, %cl
-; AVX2-NOPOPCNT-NEXT: addb %al, %cl
-; AVX2-NOPOPCNT-NEXT: andb $15, %cl
-; AVX2-NOPOPCNT-NEXT: movzbl %cl, %eax
-; AVX2-NOPOPCNT-NEXT: vpinsrb $12, %eax, %xmm2, %xmm2
-; AVX2-NOPOPCNT-NEXT: vpextrb $13, %xmm0, %eax
-; AVX2-NOPOPCNT-NEXT: movb %al, %cl
-; AVX2-NOPOPCNT-NEXT: shrb %cl
-; AVX2-NOPOPCNT-NEXT: andb $85, %cl
-; AVX2-NOPOPCNT-NEXT: subb %cl, %al
-; AVX2-NOPOPCNT-NEXT: movb %al, %cl
-; AVX2-NOPOPCNT-NEXT: andb $51, %cl
-; AVX2-NOPOPCNT-NEXT: shrb $2, %al
-; AVX2-NOPOPCNT-NEXT: andb $51, %al
-; AVX2-NOPOPCNT-NEXT: addb %cl, %al
-; AVX2-NOPOPCNT-NEXT: movb %al, %cl
-; AVX2-NOPOPCNT-NEXT: shrb $4, %cl
-; AVX2-NOPOPCNT-NEXT: addb %al, %cl
-; AVX2-NOPOPCNT-NEXT: andb $15, %cl
-; AVX2-NOPOPCNT-NEXT: movzbl %cl, %eax
-; AVX2-NOPOPCNT-NEXT: vpinsrb $13, %eax, %xmm2, %xmm2
-; AVX2-NOPOPCNT-NEXT: vpextrb $14, %xmm0, %eax
-; AVX2-NOPOPCNT-NEXT: movb %al, %cl
-; AVX2-NOPOPCNT-NEXT: shrb %cl
-; AVX2-NOPOPCNT-NEXT: andb $85, %cl
-; AVX2-NOPOPCNT-NEXT: subb %cl, %al
-; AVX2-NOPOPCNT-NEXT: movb %al, %cl
-; AVX2-NOPOPCNT-NEXT: andb $51, %cl
-; AVX2-NOPOPCNT-NEXT: shrb $2, %al
-; AVX2-NOPOPCNT-NEXT: andb $51, %al
-; AVX2-NOPOPCNT-NEXT: addb %cl, %al
-; AVX2-NOPOPCNT-NEXT: movb %al, %cl
-; AVX2-NOPOPCNT-NEXT: shrb $4, %cl
-; AVX2-NOPOPCNT-NEXT: addb %al, %cl
-; AVX2-NOPOPCNT-NEXT: andb $15, %cl
-; AVX2-NOPOPCNT-NEXT: movzbl %cl, %eax
-; AVX2-NOPOPCNT-NEXT: vpinsrb $14, %eax, %xmm2, %xmm2
-; AVX2-NOPOPCNT-NEXT: vpextrb $15, %xmm0, %eax
-; AVX2-NOPOPCNT-NEXT: movb %al, %cl
-; AVX2-NOPOPCNT-NEXT: shrb %cl
-; AVX2-NOPOPCNT-NEXT: andb $85, %cl
-; AVX2-NOPOPCNT-NEXT: subb %cl, %al
-; AVX2-NOPOPCNT-NEXT: movb %al, %cl
-; AVX2-NOPOPCNT-NEXT: andb $51, %cl
-; AVX2-NOPOPCNT-NEXT: shrb $2, %al
-; AVX2-NOPOPCNT-NEXT: andb $51, %al
-; AVX2-NOPOPCNT-NEXT: addb %cl, %al
-; AVX2-NOPOPCNT-NEXT: movb %al, %cl
-; AVX2-NOPOPCNT-NEXT: shrb $4, %cl
-; AVX2-NOPOPCNT-NEXT: addb %al, %cl
-; AVX2-NOPOPCNT-NEXT: andb $15, %cl
-; AVX2-NOPOPCNT-NEXT: movzbl %cl, %eax
-; AVX2-NOPOPCNT-NEXT: vpinsrb $15, %eax, %xmm2, %xmm0
-; AVX2-NOPOPCNT-NEXT: vinserti128 $1, %xmm1, %ymm0, %ymm0
-; AVX2-NOPOPCNT-NEXT: retq
- %out = call <32 x i8> @llvm.ctpop.v32i8(<32 x i8> %in)
- ret <32 x i8> %out
-}
-
-define <4 x i64> @testv4i64(<4 x i64> %in) {
-; AVX2-POPCNT-LABEL: testv4i64:
-; AVX2-POPCNT: # BB#0:
-; AVX2-POPCNT-NEXT: vextracti128 $1, %ymm0, %xmm1
-; AVX2-POPCNT-NEXT: vpextrq $1, %xmm1, %rax
-; AVX2-POPCNT-NEXT: popcntq %rax, %rax
-; AVX2-POPCNT-NEXT: vmovq %rax, %xmm2
-; AVX2-POPCNT-NEXT: vmovq %xmm1, %rax
-; AVX2-POPCNT-NEXT: popcntq %rax, %rax
-; AVX2-POPCNT-NEXT: vmovq %rax, %xmm1
-; AVX2-POPCNT-NEXT: vpunpcklqdq {{.*#+}} xmm1 = xmm1[0],xmm2[0]
-; AVX2-POPCNT-NEXT: vpextrq $1, %xmm0, %rax
-; AVX2-POPCNT-NEXT: popcntq %rax, %rax
-; AVX2-POPCNT-NEXT: vmovq %rax, %xmm2
-; AVX2-POPCNT-NEXT: vmovq %xmm0, %rax
-; AVX2-POPCNT-NEXT: popcntq %rax, %rax
-; AVX2-POPCNT-NEXT: vmovq %rax, %xmm0
-; AVX2-POPCNT-NEXT: vpunpcklqdq {{.*#+}} xmm0 = xmm0[0],xmm2[0]
-; AVX2-POPCNT-NEXT: vinserti128 $1, %xmm1, %ymm0, %ymm0
-; AVX2-POPCNT-NEXT: retq
-;
-; AVX2-NOPOPCNT-LABEL: testv4i64:
-; AVX2-NOPOPCNT: # BB#0:
-; AVX2-NOPOPCNT-NEXT: vpsrlq $1, %ymm0, %ymm1
-; AVX2-NOPOPCNT-NEXT: vpbroadcastq {{.*}}(%rip), %ymm2
-; AVX2-NOPOPCNT-NEXT: vpand %ymm2, %ymm1, %ymm1
-; AVX2-NOPOPCNT-NEXT: vpsubq %ymm1, %ymm0, %ymm0
-; AVX2-NOPOPCNT-NEXT: vpbroadcastq {{.*}}(%rip), %ymm1
-; AVX2-NOPOPCNT-NEXT: vpand %ymm1, %ymm0, %ymm2
-; AVX2-NOPOPCNT-NEXT: vpsrlq $2, %ymm0, %ymm0
-; AVX2-NOPOPCNT-NEXT: vpand %ymm1, %ymm0, %ymm0
-; AVX2-NOPOPCNT-NEXT: vpaddq %ymm0, %ymm2, %ymm0
-; AVX2-NOPOPCNT-NEXT: vpsrlq $4, %ymm0, %ymm1
-; AVX2-NOPOPCNT-NEXT: vpaddq %ymm1, %ymm0, %ymm0
-; AVX2-NOPOPCNT-NEXT: vpbroadcastq {{.*}}(%rip), %ymm1
-; AVX2-NOPOPCNT-NEXT: vpand %ymm1, %ymm0, %ymm0
-; AVX2-NOPOPCNT-NEXT: vpsrlq $8, %ymm0, %ymm1
-; AVX2-NOPOPCNT-NEXT: vpaddq %ymm1, %ymm0, %ymm0
-; AVX2-NOPOPCNT-NEXT: vpsrlq $16, %ymm0, %ymm1
-; AVX2-NOPOPCNT-NEXT: vpaddq %ymm1, %ymm0, %ymm0
-; AVX2-NOPOPCNT-NEXT: vpsrlq $32, %ymm0, %ymm1
-; AVX2-NOPOPCNT-NEXT: vpaddq %ymm1, %ymm0, %ymm0
-; AVX2-NOPOPCNT-NEXT: vpbroadcastq {{.*}}(%rip), %ymm1
-; AVX2-NOPOPCNT-NEXT: vpand %ymm1, %ymm0, %ymm0
-; AVX2-NOPOPCNT-NEXT: retq
- %out = call <4 x i64> @llvm.ctpop.v4i64(<4 x i64> %in)
- ret <4 x i64> %out
-}
-
-define <8 x i32> @testv8i32(<8 x i32> %in) {
-; AVX2-LABEL: testv8i32:
-; AVX2: # BB#0:
-; AVX2-NEXT: vpsrld $1, %ymm0, %ymm1
-; AVX2-NEXT: vpbroadcastd {{.*}}(%rip), %ymm2
-; AVX2-NEXT: vpand %ymm2, %ymm1, %ymm1
-; AVX2-NEXT: vpsubd %ymm1, %ymm0, %ymm0
-; AVX2-NEXT: vpbroadcastd {{.*}}(%rip), %ymm1
-; AVX2-NEXT: vpand %ymm1, %ymm0, %ymm2
-; AVX2-NEXT: vpsrld $2, %ymm0, %ymm0
-; AVX2-NEXT: vpand %ymm1, %ymm0, %ymm0
-; AVX2-NEXT: vpaddd %ymm0, %ymm2, %ymm0
-; AVX2-NEXT: vpsrld $4, %ymm0, %ymm1
-; AVX2-NEXT: vpaddd %ymm1, %ymm0, %ymm0
-; AVX2-NEXT: vpbroadcastd {{.*}}(%rip), %ymm1
-; AVX2-NEXT: vpand %ymm1, %ymm0, %ymm0
-; AVX2-NEXT: vpsrld $8, %ymm0, %ymm1
-; AVX2-NEXT: vpaddd %ymm1, %ymm0, %ymm0
-; AVX2-NEXT: vpsrld $16, %ymm0, %ymm1
-; AVX2-NEXT: vpaddd %ymm1, %ymm0, %ymm0
-; AVX2-NEXT: vpbroadcastd {{.*}}(%rip), %ymm1
-; AVX2-NEXT: vpand %ymm1, %ymm0, %ymm0
-; AVX2-NEXT: retq
- %out = call <8 x i32> @llvm.ctpop.v8i32(<8 x i32> %in)
- ret <8 x i32> %out
-}
-
-define <16 x i16> @testv16i16(<16 x i16> %in) {
-; AVX2-POPCNT-LABEL: testv16i16:
-; AVX2-POPCNT: # BB#0:
-; AVX2-POPCNT-NEXT: vextracti128 $1, %ymm0, %xmm1
-; AVX2-POPCNT-NEXT: vpextrw $1, %xmm1, %eax
-; AVX2-POPCNT-NEXT: popcntw %ax, %ax
-; AVX2-POPCNT-NEXT: vmovd %xmm1, %ecx
-; AVX2-POPCNT-NEXT: popcntw %cx, %cx
-; AVX2-POPCNT-NEXT: vmovd %ecx, %xmm2
-; AVX2-POPCNT-NEXT: vpinsrw $1, %eax, %xmm2, %xmm2
-; AVX2-POPCNT-NEXT: vpextrw $2, %xmm1, %eax
-; AVX2-POPCNT-NEXT: popcntw %ax, %ax
-; AVX2-POPCNT-NEXT: vpinsrw $2, %eax, %xmm2, %xmm2
-; AVX2-POPCNT-NEXT: vpextrw $3, %xmm1, %eax
-; AVX2-POPCNT-NEXT: popcntw %ax, %ax
-; AVX2-POPCNT-NEXT: vpinsrw $3, %eax, %xmm2, %xmm2
-; AVX2-POPCNT-NEXT: vpextrw $4, %xmm1, %eax
-; AVX2-POPCNT-NEXT: popcntw %ax, %ax
-; AVX2-POPCNT-NEXT: vpinsrw $4, %eax, %xmm2, %xmm2
-; AVX2-POPCNT-NEXT: vpextrw $5, %xmm1, %eax
-; AVX2-POPCNT-NEXT: popcntw %ax, %ax
-; AVX2-POPCNT-NEXT: vpinsrw $5, %eax, %xmm2, %xmm2
-; AVX2-POPCNT-NEXT: vpextrw $6, %xmm1, %eax
-; AVX2-POPCNT-NEXT: popcntw %ax, %ax
-; AVX2-POPCNT-NEXT: vpinsrw $6, %eax, %xmm2, %xmm2
-; AVX2-POPCNT-NEXT: vpextrw $7, %xmm1, %eax
-; AVX2-POPCNT-NEXT: popcntw %ax, %ax
-; AVX2-POPCNT-NEXT: vpinsrw $7, %eax, %xmm2, %xmm1
-; AVX2-POPCNT-NEXT: vpextrw $1, %xmm0, %eax
-; AVX2-POPCNT-NEXT: popcntw %ax, %ax
-; AVX2-POPCNT-NEXT: vmovd %xmm0, %ecx
-; AVX2-POPCNT-NEXT: popcntw %cx, %cx
-; AVX2-POPCNT-NEXT: vmovd %ecx, %xmm2
-; AVX2-POPCNT-NEXT: vpinsrw $1, %eax, %xmm2, %xmm2
-; AVX2-POPCNT-NEXT: vpextrw $2, %xmm0, %eax
-; AVX2-POPCNT-NEXT: popcntw %ax, %ax
-; AVX2-POPCNT-NEXT: vpinsrw $2, %eax, %xmm2, %xmm2
-; AVX2-POPCNT-NEXT: vpextrw $3, %xmm0, %eax
-; AVX2-POPCNT-NEXT: popcntw %ax, %ax
-; AVX2-POPCNT-NEXT: vpinsrw $3, %eax, %xmm2, %xmm2
-; AVX2-POPCNT-NEXT: vpextrw $4, %xmm0, %eax
-; AVX2-POPCNT-NEXT: popcntw %ax, %ax
-; AVX2-POPCNT-NEXT: vpinsrw $4, %eax, %xmm2, %xmm2
-; AVX2-POPCNT-NEXT: vpextrw $5, %xmm0, %eax
-; AVX2-POPCNT-NEXT: popcntw %ax, %ax
-; AVX2-POPCNT-NEXT: vpinsrw $5, %eax, %xmm2, %xmm2
-; AVX2-POPCNT-NEXT: vpextrw $6, %xmm0, %eax
-; AVX2-POPCNT-NEXT: popcntw %ax, %ax
-; AVX2-POPCNT-NEXT: vpinsrw $6, %eax, %xmm2, %xmm2
-; AVX2-POPCNT-NEXT: vpextrw $7, %xmm0, %eax
-; AVX2-POPCNT-NEXT: popcntw %ax, %ax
-; AVX2-POPCNT-NEXT: vpinsrw $7, %eax, %xmm2, %xmm0
-; AVX2-POPCNT-NEXT: vinserti128 $1, %xmm1, %ymm0, %ymm0
-; AVX2-POPCNT-NEXT: retq
-;
-; AVX2-NOPOPCNT-LABEL: testv16i16:
-; AVX2-NOPOPCNT: # BB#0:
-; AVX2-NOPOPCNT-NEXT: vextracti128 $1, %ymm0, %xmm1
-; AVX2-NOPOPCNT-NEXT: vpextrw $1, %xmm1, %eax
-; AVX2-NOPOPCNT-NEXT: movl %eax, %ecx
-; AVX2-NOPOPCNT-NEXT: shrl %ecx
-; AVX2-NOPOPCNT-NEXT: andl $21845, %ecx # imm = 0x5555
-; AVX2-NOPOPCNT-NEXT: subl %ecx, %eax
-; AVX2-NOPOPCNT-NEXT: movl %eax, %ecx
-; AVX2-NOPOPCNT-NEXT: andl $13107, %ecx # imm = 0x3333
-; AVX2-NOPOPCNT-NEXT: shrl $2, %eax
-; AVX2-NOPOPCNT-NEXT: andl $13107, %eax # imm = 0x3333
-; AVX2-NOPOPCNT-NEXT: addl %ecx, %eax
-; AVX2-NOPOPCNT-NEXT: movl %eax, %ecx
-; AVX2-NOPOPCNT-NEXT: andl $65520, %ecx # imm = 0xFFF0
-; AVX2-NOPOPCNT-NEXT: shrl $4, %ecx
-; AVX2-NOPOPCNT-NEXT: addl %eax, %ecx
-; AVX2-NOPOPCNT-NEXT: andl $3855, %ecx # imm = 0xF0F
-; AVX2-NOPOPCNT-NEXT: imull $257, %ecx, %eax # imm = 0x101
-; AVX2-NOPOPCNT-NEXT: movzbl %ah, %eax # NOREX
-; AVX2-NOPOPCNT-NEXT: vmovd %xmm1, %ecx
-; AVX2-NOPOPCNT-NEXT: movl %ecx, %edx
-; AVX2-NOPOPCNT-NEXT: shrl %edx
-; AVX2-NOPOPCNT-NEXT: andl $21845, %edx # imm = 0x5555
-; AVX2-NOPOPCNT-NEXT: subl %edx, %ecx
-; AVX2-NOPOPCNT-NEXT: movl %ecx, %edx
-; AVX2-NOPOPCNT-NEXT: andl $13107, %edx # imm = 0x3333
-; AVX2-NOPOPCNT-NEXT: shrl $2, %ecx
-; AVX2-NOPOPCNT-NEXT: andl $13107, %ecx # imm = 0x3333
-; AVX2-NOPOPCNT-NEXT: addl %edx, %ecx
-; AVX2-NOPOPCNT-NEXT: movl %ecx, %edx
-; AVX2-NOPOPCNT-NEXT: andl $65520, %edx # imm = 0xFFF0
-; AVX2-NOPOPCNT-NEXT: shrl $4, %edx
-; AVX2-NOPOPCNT-NEXT: addl %ecx, %edx
-; AVX2-NOPOPCNT-NEXT: andl $3855, %edx # imm = 0xF0F
-; AVX2-NOPOPCNT-NEXT: imull $257, %edx, %ecx # imm = 0x101
-; AVX2-NOPOPCNT-NEXT: movzbl %ch, %ecx # NOREX
-; AVX2-NOPOPCNT-NEXT: vmovd %ecx, %xmm2
-; AVX2-NOPOPCNT-NEXT: vpinsrw $1, %eax, %xmm2, %xmm2
-; AVX2-NOPOPCNT-NEXT: vpextrw $2, %xmm1, %eax
-; AVX2-NOPOPCNT-NEXT: movl %eax, %ecx
-; AVX2-NOPOPCNT-NEXT: shrl %ecx
-; AVX2-NOPOPCNT-NEXT: andl $21845, %ecx # imm = 0x5555
-; AVX2-NOPOPCNT-NEXT: subl %ecx, %eax
-; AVX2-NOPOPCNT-NEXT: movl %eax, %ecx
-; AVX2-NOPOPCNT-NEXT: andl $13107, %ecx # imm = 0x3333
-; AVX2-NOPOPCNT-NEXT: shrl $2, %eax
-; AVX2-NOPOPCNT-NEXT: andl $13107, %eax # imm = 0x3333
-; AVX2-NOPOPCNT-NEXT: addl %ecx, %eax
-; AVX2-NOPOPCNT-NEXT: movl %eax, %ecx
-; AVX2-NOPOPCNT-NEXT: andl $65520, %ecx # imm = 0xFFF0
-; AVX2-NOPOPCNT-NEXT: shrl $4, %ecx
-; AVX2-NOPOPCNT-NEXT: addl %eax, %ecx
-; AVX2-NOPOPCNT-NEXT: andl $3855, %ecx # imm = 0xF0F
-; AVX2-NOPOPCNT-NEXT: imull $257, %ecx, %eax # imm = 0x101
-; AVX2-NOPOPCNT-NEXT: movzbl %ah, %eax # NOREX
-; AVX2-NOPOPCNT-NEXT: vpinsrw $2, %eax, %xmm2, %xmm2
-; AVX2-NOPOPCNT-NEXT: vpextrw $3, %xmm1, %eax
-; AVX2-NOPOPCNT-NEXT: movl %eax, %ecx
-; AVX2-NOPOPCNT-NEXT: shrl %ecx
-; AVX2-NOPOPCNT-NEXT: andl $21845, %ecx # imm = 0x5555
-; AVX2-NOPOPCNT-NEXT: subl %ecx, %eax
-; AVX2-NOPOPCNT-NEXT: movl %eax, %ecx
-; AVX2-NOPOPCNT-NEXT: andl $13107, %ecx # imm = 0x3333
-; AVX2-NOPOPCNT-NEXT: shrl $2, %eax
-; AVX2-NOPOPCNT-NEXT: andl $13107, %eax # imm = 0x3333
-; AVX2-NOPOPCNT-NEXT: addl %ecx, %eax
-; AVX2-NOPOPCNT-NEXT: movl %eax, %ecx
-; AVX2-NOPOPCNT-NEXT: andl $65520, %ecx # imm = 0xFFF0
-; AVX2-NOPOPCNT-NEXT: shrl $4, %ecx
-; AVX2-NOPOPCNT-NEXT: addl %eax, %ecx
-; AVX2-NOPOPCNT-NEXT: andl $3855, %ecx # imm = 0xF0F
-; AVX2-NOPOPCNT-NEXT: imull $257, %ecx, %eax # imm = 0x101
-; AVX2-NOPOPCNT-NEXT: movzbl %ah, %eax # NOREX
-; AVX2-NOPOPCNT-NEXT: vpinsrw $3, %eax, %xmm2, %xmm2
-; AVX2-NOPOPCNT-NEXT: vpextrw $4, %xmm1, %eax
-; AVX2-NOPOPCNT-NEXT: movl %eax, %ecx
-; AVX2-NOPOPCNT-NEXT: shrl %ecx
-; AVX2-NOPOPCNT-NEXT: andl $21845, %ecx # imm = 0x5555
-; AVX2-NOPOPCNT-NEXT: subl %ecx, %eax
-; AVX2-NOPOPCNT-NEXT: movl %eax, %ecx
-; AVX2-NOPOPCNT-NEXT: andl $13107, %ecx # imm = 0x3333
-; AVX2-NOPOPCNT-NEXT: shrl $2, %eax
-; AVX2-NOPOPCNT-NEXT: andl $13107, %eax # imm = 0x3333
-; AVX2-NOPOPCNT-NEXT: addl %ecx, %eax
-; AVX2-NOPOPCNT-NEXT: movl %eax, %ecx
-; AVX2-NOPOPCNT-NEXT: andl $65520, %ecx # imm = 0xFFF0
-; AVX2-NOPOPCNT-NEXT: shrl $4, %ecx
-; AVX2-NOPOPCNT-NEXT: addl %eax, %ecx
-; AVX2-NOPOPCNT-NEXT: andl $3855, %ecx # imm = 0xF0F
-; AVX2-NOPOPCNT-NEXT: imull $257, %ecx, %eax # imm = 0x101
-; AVX2-NOPOPCNT-NEXT: movzbl %ah, %eax # NOREX
-; AVX2-NOPOPCNT-NEXT: vpinsrw $4, %eax, %xmm2, %xmm2
-; AVX2-NOPOPCNT-NEXT: vpextrw $5, %xmm1, %eax
-; AVX2-NOPOPCNT-NEXT: movl %eax, %ecx
-; AVX2-NOPOPCNT-NEXT: shrl %ecx
-; AVX2-NOPOPCNT-NEXT: andl $21845, %ecx # imm = 0x5555
-; AVX2-NOPOPCNT-NEXT: subl %ecx, %eax
-; AVX2-NOPOPCNT-NEXT: movl %eax, %ecx
-; AVX2-NOPOPCNT-NEXT: andl $13107, %ecx # imm = 0x3333
-; AVX2-NOPOPCNT-NEXT: shrl $2, %eax
-; AVX2-NOPOPCNT-NEXT: andl $13107, %eax # imm = 0x3333
-; AVX2-NOPOPCNT-NEXT: addl %ecx, %eax
-; AVX2-NOPOPCNT-NEXT: movl %eax, %ecx
-; AVX2-NOPOPCNT-NEXT: andl $65520, %ecx # imm = 0xFFF0
-; AVX2-NOPOPCNT-NEXT: shrl $4, %ecx
-; AVX2-NOPOPCNT-NEXT: addl %eax, %ecx
-; AVX2-NOPOPCNT-NEXT: andl $3855, %ecx # imm = 0xF0F
-; AVX2-NOPOPCNT-NEXT: imull $257, %ecx, %eax # imm = 0x101
-; AVX2-NOPOPCNT-NEXT: movzbl %ah, %eax # NOREX
-; AVX2-NOPOPCNT-NEXT: vpinsrw $5, %eax, %xmm2, %xmm2
-; AVX2-NOPOPCNT-NEXT: vpextrw $6, %xmm1, %eax
-; AVX2-NOPOPCNT-NEXT: movl %eax, %ecx
-; AVX2-NOPOPCNT-NEXT: shrl %ecx
-; AVX2-NOPOPCNT-NEXT: andl $21845, %ecx # imm = 0x5555
-; AVX2-NOPOPCNT-NEXT: subl %ecx, %eax
-; AVX2-NOPOPCNT-NEXT: movl %eax, %ecx
-; AVX2-NOPOPCNT-NEXT: andl $13107, %ecx # imm = 0x3333
-; AVX2-NOPOPCNT-NEXT: shrl $2, %eax
-; AVX2-NOPOPCNT-NEXT: andl $13107, %eax # imm = 0x3333
-; AVX2-NOPOPCNT-NEXT: addl %ecx, %eax
-; AVX2-NOPOPCNT-NEXT: movl %eax, %ecx
-; AVX2-NOPOPCNT-NEXT: andl $65520, %ecx # imm = 0xFFF0
-; AVX2-NOPOPCNT-NEXT: shrl $4, %ecx
-; AVX2-NOPOPCNT-NEXT: addl %eax, %ecx
-; AVX2-NOPOPCNT-NEXT: andl $3855, %ecx # imm = 0xF0F
-; AVX2-NOPOPCNT-NEXT: imull $257, %ecx, %eax # imm = 0x101
-; AVX2-NOPOPCNT-NEXT: movzbl %ah, %eax # NOREX
-; AVX2-NOPOPCNT-NEXT: vpinsrw $6, %eax, %xmm2, %xmm2
-; AVX2-NOPOPCNT-NEXT: vpextrw $7, %xmm1, %eax
-; AVX2-NOPOPCNT-NEXT: movl %eax, %ecx
-; AVX2-NOPOPCNT-NEXT: shrl %ecx
-; AVX2-NOPOPCNT-NEXT: andl $21845, %ecx # imm = 0x5555
-; AVX2-NOPOPCNT-NEXT: subl %ecx, %eax
-; AVX2-NOPOPCNT-NEXT: movl %eax, %ecx
-; AVX2-NOPOPCNT-NEXT: andl $13107, %ecx # imm = 0x3333
-; AVX2-NOPOPCNT-NEXT: shrl $2, %eax
-; AVX2-NOPOPCNT-NEXT: andl $13107, %eax # imm = 0x3333
-; AVX2-NOPOPCNT-NEXT: addl %ecx, %eax
-; AVX2-NOPOPCNT-NEXT: movl %eax, %ecx
-; AVX2-NOPOPCNT-NEXT: andl $65520, %ecx # imm = 0xFFF0
-; AVX2-NOPOPCNT-NEXT: shrl $4, %ecx
-; AVX2-NOPOPCNT-NEXT: addl %eax, %ecx
-; AVX2-NOPOPCNT-NEXT: andl $3855, %ecx # imm = 0xF0F
-; AVX2-NOPOPCNT-NEXT: imull $257, %ecx, %eax # imm = 0x101
-; AVX2-NOPOPCNT-NEXT: movzbl %ah, %eax # NOREX
-; AVX2-NOPOPCNT-NEXT: vpinsrw $7, %eax, %xmm2, %xmm1
-; AVX2-NOPOPCNT-NEXT: vpextrw $1, %xmm0, %eax
-; AVX2-NOPOPCNT-NEXT: movl %eax, %ecx
-; AVX2-NOPOPCNT-NEXT: shrl %ecx
-; AVX2-NOPOPCNT-NEXT: andl $21845, %ecx # imm = 0x5555
-; AVX2-NOPOPCNT-NEXT: subl %ecx, %eax
-; AVX2-NOPOPCNT-NEXT: movl %eax, %ecx
-; AVX2-NOPOPCNT-NEXT: andl $13107, %ecx # imm = 0x3333
-; AVX2-NOPOPCNT-NEXT: shrl $2, %eax
-; AVX2-NOPOPCNT-NEXT: andl $13107, %eax # imm = 0x3333
-; AVX2-NOPOPCNT-NEXT: addl %ecx, %eax
-; AVX2-NOPOPCNT-NEXT: movl %eax, %ecx
-; AVX2-NOPOPCNT-NEXT: andl $65520, %ecx # imm = 0xFFF0
-; AVX2-NOPOPCNT-NEXT: shrl $4, %ecx
-; AVX2-NOPOPCNT-NEXT: addl %eax, %ecx
-; AVX2-NOPOPCNT-NEXT: andl $3855, %ecx # imm = 0xF0F
-; AVX2-NOPOPCNT-NEXT: imull $257, %ecx, %eax # imm = 0x101
-; AVX2-NOPOPCNT-NEXT: movzbl %ah, %eax # NOREX
-; AVX2-NOPOPCNT-NEXT: vmovd %xmm0, %ecx
-; AVX2-NOPOPCNT-NEXT: movl %ecx, %edx
-; AVX2-NOPOPCNT-NEXT: shrl %edx
-; AVX2-NOPOPCNT-NEXT: andl $21845, %edx # imm = 0x5555
-; AVX2-NOPOPCNT-NEXT: subl %edx, %ecx
-; AVX2-NOPOPCNT-NEXT: movl %ecx, %edx
-; AVX2-NOPOPCNT-NEXT: andl $13107, %edx # imm = 0x3333
-; AVX2-NOPOPCNT-NEXT: shrl $2, %ecx
-; AVX2-NOPOPCNT-NEXT: andl $13107, %ecx # imm = 0x3333
-; AVX2-NOPOPCNT-NEXT: addl %edx, %ecx
-; AVX2-NOPOPCNT-NEXT: movl %ecx, %edx
-; AVX2-NOPOPCNT-NEXT: andl $65520, %edx # imm = 0xFFF0
-; AVX2-NOPOPCNT-NEXT: shrl $4, %edx
-; AVX2-NOPOPCNT-NEXT: addl %ecx, %edx
-; AVX2-NOPOPCNT-NEXT: andl $3855, %edx # imm = 0xF0F
-; AVX2-NOPOPCNT-NEXT: imull $257, %edx, %ecx # imm = 0x101
-; AVX2-NOPOPCNT-NEXT: movzbl %ch, %ecx # NOREX
-; AVX2-NOPOPCNT-NEXT: vmovd %ecx, %xmm2
-; AVX2-NOPOPCNT-NEXT: vpinsrw $1, %eax, %xmm2, %xmm2
-; AVX2-NOPOPCNT-NEXT: vpextrw $2, %xmm0, %eax
-; AVX2-NOPOPCNT-NEXT: movl %eax, %ecx
-; AVX2-NOPOPCNT-NEXT: shrl %ecx
-; AVX2-NOPOPCNT-NEXT: andl $21845, %ecx # imm = 0x5555
-; AVX2-NOPOPCNT-NEXT: subl %ecx, %eax
-; AVX2-NOPOPCNT-NEXT: movl %eax, %ecx
-; AVX2-NOPOPCNT-NEXT: andl $13107, %ecx # imm = 0x3333
-; AVX2-NOPOPCNT-NEXT: shrl $2, %eax
-; AVX2-NOPOPCNT-NEXT: andl $13107, %eax # imm = 0x3333
-; AVX2-NOPOPCNT-NEXT: addl %ecx, %eax
-; AVX2-NOPOPCNT-NEXT: movl %eax, %ecx
-; AVX2-NOPOPCNT-NEXT: andl $65520, %ecx # imm = 0xFFF0
-; AVX2-NOPOPCNT-NEXT: shrl $4, %ecx
-; AVX2-NOPOPCNT-NEXT: addl %eax, %ecx
-; AVX2-NOPOPCNT-NEXT: andl $3855, %ecx # imm = 0xF0F
-; AVX2-NOPOPCNT-NEXT: imull $257, %ecx, %eax # imm = 0x101
-; AVX2-NOPOPCNT-NEXT: movzbl %ah, %eax # NOREX
-; AVX2-NOPOPCNT-NEXT: vpinsrw $2, %eax, %xmm2, %xmm2
-; AVX2-NOPOPCNT-NEXT: vpextrw $3, %xmm0, %eax
-; AVX2-NOPOPCNT-NEXT: movl %eax, %ecx
-; AVX2-NOPOPCNT-NEXT: shrl %ecx
-; AVX2-NOPOPCNT-NEXT: andl $21845, %ecx # imm = 0x5555
-; AVX2-NOPOPCNT-NEXT: subl %ecx, %eax
-; AVX2-NOPOPCNT-NEXT: movl %eax, %ecx
-; AVX2-NOPOPCNT-NEXT: andl $13107, %ecx # imm = 0x3333
-; AVX2-NOPOPCNT-NEXT: shrl $2, %eax
-; AVX2-NOPOPCNT-NEXT: andl $13107, %eax # imm = 0x3333
-; AVX2-NOPOPCNT-NEXT: addl %ecx, %eax
-; AVX2-NOPOPCNT-NEXT: movl %eax, %ecx
-; AVX2-NOPOPCNT-NEXT: andl $65520, %ecx # imm = 0xFFF0
-; AVX2-NOPOPCNT-NEXT: shrl $4, %ecx
-; AVX2-NOPOPCNT-NEXT: addl %eax, %ecx
-; AVX2-NOPOPCNT-NEXT: andl $3855, %ecx # imm = 0xF0F
-; AVX2-NOPOPCNT-NEXT: imull $257, %ecx, %eax # imm = 0x101
-; AVX2-NOPOPCNT-NEXT: movzbl %ah, %eax # NOREX
-; AVX2-NOPOPCNT-NEXT: vpinsrw $3, %eax, %xmm2, %xmm2
-; AVX2-NOPOPCNT-NEXT: vpextrw $4, %xmm0, %eax
-; AVX2-NOPOPCNT-NEXT: movl %eax, %ecx
-; AVX2-NOPOPCNT-NEXT: shrl %ecx
-; AVX2-NOPOPCNT-NEXT: andl $21845, %ecx # imm = 0x5555
-; AVX2-NOPOPCNT-NEXT: subl %ecx, %eax
-; AVX2-NOPOPCNT-NEXT: movl %eax, %ecx
-; AVX2-NOPOPCNT-NEXT: andl $13107, %ecx # imm = 0x3333
-; AVX2-NOPOPCNT-NEXT: shrl $2, %eax
-; AVX2-NOPOPCNT-NEXT: andl $13107, %eax # imm = 0x3333
-; AVX2-NOPOPCNT-NEXT: addl %ecx, %eax
-; AVX2-NOPOPCNT-NEXT: movl %eax, %ecx
-; AVX2-NOPOPCNT-NEXT: andl $65520, %ecx # imm = 0xFFF0
-; AVX2-NOPOPCNT-NEXT: shrl $4, %ecx
-; AVX2-NOPOPCNT-NEXT: addl %eax, %ecx
-; AVX2-NOPOPCNT-NEXT: andl $3855, %ecx # imm = 0xF0F
-; AVX2-NOPOPCNT-NEXT: imull $257, %ecx, %eax # imm = 0x101
-; AVX2-NOPOPCNT-NEXT: movzbl %ah, %eax # NOREX
-; AVX2-NOPOPCNT-NEXT: vpinsrw $4, %eax, %xmm2, %xmm2
-; AVX2-NOPOPCNT-NEXT: vpextrw $5, %xmm0, %eax
-; AVX2-NOPOPCNT-NEXT: movl %eax, %ecx
-; AVX2-NOPOPCNT-NEXT: shrl %ecx
-; AVX2-NOPOPCNT-NEXT: andl $21845, %ecx # imm = 0x5555
-; AVX2-NOPOPCNT-NEXT: subl %ecx, %eax
-; AVX2-NOPOPCNT-NEXT: movl %eax, %ecx
-; AVX2-NOPOPCNT-NEXT: andl $13107, %ecx # imm = 0x3333
-; AVX2-NOPOPCNT-NEXT: shrl $2, %eax
-; AVX2-NOPOPCNT-NEXT: andl $13107, %eax # imm = 0x3333
-; AVX2-NOPOPCNT-NEXT: addl %ecx, %eax
-; AVX2-NOPOPCNT-NEXT: movl %eax, %ecx
-; AVX2-NOPOPCNT-NEXT: andl $65520, %ecx # imm = 0xFFF0
-; AVX2-NOPOPCNT-NEXT: shrl $4, %ecx
-; AVX2-NOPOPCNT-NEXT: addl %eax, %ecx
-; AVX2-NOPOPCNT-NEXT: andl $3855, %ecx # imm = 0xF0F
-; AVX2-NOPOPCNT-NEXT: imull $257, %ecx, %eax # imm = 0x101
-; AVX2-NOPOPCNT-NEXT: movzbl %ah, %eax # NOREX
-; AVX2-NOPOPCNT-NEXT: vpinsrw $5, %eax, %xmm2, %xmm2
-; AVX2-NOPOPCNT-NEXT: vpextrw $6, %xmm0, %eax
-; AVX2-NOPOPCNT-NEXT: movl %eax, %ecx
-; AVX2-NOPOPCNT-NEXT: shrl %ecx
-; AVX2-NOPOPCNT-NEXT: andl $21845, %ecx # imm = 0x5555
-; AVX2-NOPOPCNT-NEXT: subl %ecx, %eax
-; AVX2-NOPOPCNT-NEXT: movl %eax, %ecx
-; AVX2-NOPOPCNT-NEXT: andl $13107, %ecx # imm = 0x3333
-; AVX2-NOPOPCNT-NEXT: shrl $2, %eax
-; AVX2-NOPOPCNT-NEXT: andl $13107, %eax # imm = 0x3333
-; AVX2-NOPOPCNT-NEXT: addl %ecx, %eax
-; AVX2-NOPOPCNT-NEXT: movl %eax, %ecx
-; AVX2-NOPOPCNT-NEXT: andl $65520, %ecx # imm = 0xFFF0
-; AVX2-NOPOPCNT-NEXT: shrl $4, %ecx
-; AVX2-NOPOPCNT-NEXT: addl %eax, %ecx
-; AVX2-NOPOPCNT-NEXT: andl $3855, %ecx # imm = 0xF0F
-; AVX2-NOPOPCNT-NEXT: imull $257, %ecx, %eax # imm = 0x101
-; AVX2-NOPOPCNT-NEXT: movzbl %ah, %eax # NOREX
-; AVX2-NOPOPCNT-NEXT: vpinsrw $6, %eax, %xmm2, %xmm2
-; AVX2-NOPOPCNT-NEXT: vpextrw $7, %xmm0, %eax
-; AVX2-NOPOPCNT-NEXT: movl %eax, %ecx
-; AVX2-NOPOPCNT-NEXT: shrl %ecx
-; AVX2-NOPOPCNT-NEXT: andl $21845, %ecx # imm = 0x5555
-; AVX2-NOPOPCNT-NEXT: subl %ecx, %eax
-; AVX2-NOPOPCNT-NEXT: movl %eax, %ecx
-; AVX2-NOPOPCNT-NEXT: andl $13107, %ecx # imm = 0x3333
-; AVX2-NOPOPCNT-NEXT: shrl $2, %eax
-; AVX2-NOPOPCNT-NEXT: andl $13107, %eax # imm = 0x3333
-; AVX2-NOPOPCNT-NEXT: addl %ecx, %eax
-; AVX2-NOPOPCNT-NEXT: movl %eax, %ecx
-; AVX2-NOPOPCNT-NEXT: andl $65520, %ecx # imm = 0xFFF0
-; AVX2-NOPOPCNT-NEXT: shrl $4, %ecx
-; AVX2-NOPOPCNT-NEXT: addl %eax, %ecx
-; AVX2-NOPOPCNT-NEXT: andl $3855, %ecx # imm = 0xF0F
-; AVX2-NOPOPCNT-NEXT: imull $257, %ecx, %eax # imm = 0x101
-; AVX2-NOPOPCNT-NEXT: movzbl %ah, %eax # NOREX
-; AVX2-NOPOPCNT-NEXT: vpinsrw $7, %eax, %xmm2, %xmm0
-; AVX2-NOPOPCNT-NEXT: vinserti128 $1, %xmm1, %ymm0, %ymm0
-; AVX2-NOPOPCNT-NEXT: retq
- %out = call <16 x i16> @llvm.ctpop.v16i16(<16 x i16> %in)
- ret <16 x i16> %out
-}
-
-declare <32 x i8> @llvm.ctpop.v32i8(<32 x i8>)
-declare <4 x i64> @llvm.ctpop.v4i64(<4 x i64>)
-declare <8 x i32> @llvm.ctpop.v8i32(<8 x i32>)
-declare <16 x i16> @llvm.ctpop.v16i16(<16 x i16>)
--- /dev/null
+; RUN: llc < %s -mtriple=x86_64-unknown-unknown -mcpu=x86-64 | FileCheck %s --check-prefix=ALL --check-prefix=SSE --check-prefix=SSE2
+; RUN: llc < %s -mtriple=x86_64-unknown-unknown -mcpu=x86-64 -mattr=+sse3 | FileCheck %s --check-prefix=ALL --check-prefix=SSE --check-prefix=SSE3
+; RUN: llc < %s -mtriple=x86_64-unknown-unknown -mcpu=x86-64 -mattr=+ssse3 | FileCheck %s --check-prefix=ALL --check-prefix=SSE --check-prefix=SSSE3
+; RUN: llc < %s -mtriple=x86_64-unknown-unknown -mcpu=x86-64 -mattr=+sse4.1 | FileCheck %s --check-prefix=ALL --check-prefix=SSE --check-prefix=SSE41
+; RUN: llc < %s -mtriple=x86_64-unknown-unknown -mcpu=x86-64 -mattr=+avx | FileCheck %s --check-prefix=ALL --check-prefix=AVX --check-prefix=AVX1
+; RUN: llc < %s -mtriple=x86_64-unknown-unknown -mcpu=x86-64 -mattr=+avx2 | FileCheck %s --check-prefix=ALL --check-prefix=AVX --check-prefix=AVX2
+
+target triple = "x86_64-unknown-unknown"
+
+define <2 x i64> @testv2i64(<2 x i64> %in) {
+; SSE-LABEL: testv2i64:
+; SSE: # BB#0:
+; SSE-NEXT: movdqa %xmm0, %xmm1
+; SSE-NEXT: psrlq $1, %xmm1
+; SSE-NEXT: pand {{.*}}(%rip), %xmm1
+; SSE-NEXT: psubq %xmm1, %xmm0
+; SSE-NEXT: movdqa {{.*#+}} xmm1 = [3689348814741910323,3689348814741910323]
+; SSE-NEXT: movdqa %xmm0, %xmm2
+; SSE-NEXT: pand %xmm1, %xmm2
+; SSE-NEXT: psrlq $2, %xmm0
+; SSE-NEXT: pand %xmm1, %xmm0
+; SSE-NEXT: paddq %xmm2, %xmm0
+; SSE-NEXT: movdqa %xmm0, %xmm1
+; SSE-NEXT: psrlq $4, %xmm1
+; SSE-NEXT: paddq %xmm0, %xmm1
+; SSE-NEXT: pand {{.*}}(%rip), %xmm1
+; SSE-NEXT: movdqa %xmm1, %xmm0
+; SSE-NEXT: psrlq $8, %xmm0
+; SSE-NEXT: paddq %xmm1, %xmm0
+; SSE-NEXT: movdqa %xmm0, %xmm1
+; SSE-NEXT: psrlq $16, %xmm1
+; SSE-NEXT: paddq %xmm0, %xmm1
+; SSE-NEXT: movdqa %xmm1, %xmm0
+; SSE-NEXT: psrlq $32, %xmm0
+; SSE-NEXT: paddq %xmm1, %xmm0
+; SSE-NEXT: pand {{.*}}(%rip), %xmm0
+; SSE-NEXT: retq
+;
+; AVX-LABEL: testv2i64:
+; AVX: # BB#0:
+; AVX-NEXT: vpsrlq $1, %xmm0, %xmm1
+; AVX-NEXT: vpand {{.*}}(%rip), %xmm1, %xmm1
+; AVX-NEXT: vpsubq %xmm1, %xmm0, %xmm0
+; AVX-NEXT: vmovdqa {{.*#+}} xmm1 = [3689348814741910323,3689348814741910323]
+; AVX-NEXT: vpand %xmm1, %xmm0, %xmm2
+; AVX-NEXT: vpsrlq $2, %xmm0, %xmm0
+; AVX-NEXT: vpand %xmm1, %xmm0, %xmm0
+; AVX-NEXT: vpaddq %xmm0, %xmm2, %xmm0
+; AVX-NEXT: vpsrlq $4, %xmm0, %xmm1
+; AVX-NEXT: vpaddq %xmm1, %xmm0, %xmm0
+; AVX-NEXT: vpand {{.*}}(%rip), %xmm0, %xmm0
+; AVX-NEXT: vpsrlq $8, %xmm0, %xmm1
+; AVX-NEXT: vpaddq %xmm1, %xmm0, %xmm0
+; AVX-NEXT: vpsrlq $16, %xmm0, %xmm1
+; AVX-NEXT: vpaddq %xmm1, %xmm0, %xmm0
+; AVX-NEXT: vpsrlq $32, %xmm0, %xmm1
+; AVX-NEXT: vpaddq %xmm1, %xmm0, %xmm0
+; AVX-NEXT: vpand {{.*}}(%rip), %xmm0, %xmm0
+; AVX-NEXT: retq
+ %out = call <2 x i64> @llvm.ctpop.v2i64(<2 x i64> %in)
+ ret <2 x i64> %out
+}
+
+define <4 x i32> @testv4i32(<4 x i32> %in) {
+; SSE-LABEL: testv4i32:
+; SSE: # BB#0:
+; SSE-NEXT: movdqa %xmm0, %xmm1
+; SSE-NEXT: psrld $1, %xmm1
+; SSE-NEXT: pand {{.*}}(%rip), %xmm1
+; SSE-NEXT: psubd %xmm1, %xmm0
+; SSE-NEXT: movdqa {{.*#+}} xmm1 = [858993459,858993459,858993459,858993459]
+; SSE-NEXT: movdqa %xmm0, %xmm2
+; SSE-NEXT: pand %xmm1, %xmm2
+; SSE-NEXT: psrld $2, %xmm0
+; SSE-NEXT: pand %xmm1, %xmm0
+; SSE-NEXT: paddd %xmm2, %xmm0
+; SSE-NEXT: movdqa %xmm0, %xmm1
+; SSE-NEXT: psrld $4, %xmm1
+; SSE-NEXT: paddd %xmm0, %xmm1
+; SSE-NEXT: pand {{.*}}(%rip), %xmm1
+; SSE-NEXT: movdqa %xmm1, %xmm2
+; SSE-NEXT: psrld $8, %xmm2
+; SSE-NEXT: paddd %xmm1, %xmm2
+; SSE-NEXT: movdqa %xmm2, %xmm0
+; SSE-NEXT: psrld $16, %xmm0
+; SSE-NEXT: paddd %xmm2, %xmm0
+; SSE-NEXT: pand {{.*}}(%rip), %xmm0
+; SSE-NEXT: retq
+;
+; AVX1-LABEL: testv4i32:
+; AVX1: # BB#0:
+; AVX1-NEXT: vpsrld $1, %xmm0, %xmm1
+; AVX1-NEXT: vpand {{.*}}(%rip), %xmm1, %xmm1
+; AVX1-NEXT: vpsubd %xmm1, %xmm0, %xmm0
+; AVX1-NEXT: vmovdqa {{.*#+}} xmm1 = [858993459,858993459,858993459,858993459]
+; AVX1-NEXT: vpand %xmm1, %xmm0, %xmm2
+; AVX1-NEXT: vpsrld $2, %xmm0, %xmm0
+; AVX1-NEXT: vpand %xmm1, %xmm0, %xmm0
+; AVX1-NEXT: vpaddd %xmm0, %xmm2, %xmm0
+; AVX1-NEXT: vpsrld $4, %xmm0, %xmm1
+; AVX1-NEXT: vpaddd %xmm1, %xmm0, %xmm0
+; AVX1-NEXT: vpand {{.*}}(%rip), %xmm0, %xmm0
+; AVX1-NEXT: vpsrld $8, %xmm0, %xmm1
+; AVX1-NEXT: vpaddd %xmm1, %xmm0, %xmm0
+; AVX1-NEXT: vpsrld $16, %xmm0, %xmm1
+; AVX1-NEXT: vpaddd %xmm1, %xmm0, %xmm0
+; AVX1-NEXT: vpand {{.*}}(%rip), %xmm0, %xmm0
+; AVX1-NEXT: retq
+;
+; AVX2-LABEL: testv4i32:
+; AVX2: # BB#0:
+; AVX2-NEXT: vpsrld $1, %xmm0, %xmm1
+; AVX2-NEXT: vpbroadcastd {{.*}}(%rip), %xmm2
+; AVX2-NEXT: vpand %xmm2, %xmm1, %xmm1
+; AVX2-NEXT: vpsubd %xmm1, %xmm0, %xmm0
+; AVX2-NEXT: vpbroadcastd {{.*}}(%rip), %xmm1
+; AVX2-NEXT: vpand %xmm1, %xmm0, %xmm2
+; AVX2-NEXT: vpsrld $2, %xmm0, %xmm0
+; AVX2-NEXT: vpand %xmm1, %xmm0, %xmm0
+; AVX2-NEXT: vpaddd %xmm0, %xmm2, %xmm0
+; AVX2-NEXT: vpsrld $4, %xmm0, %xmm1
+; AVX2-NEXT: vpaddd %xmm1, %xmm0, %xmm0
+; AVX2-NEXT: vpbroadcastd {{.*}}(%rip), %xmm1
+; AVX2-NEXT: vpand %xmm1, %xmm0, %xmm0
+; AVX2-NEXT: vpsrld $8, %xmm0, %xmm1
+; AVX2-NEXT: vpaddd %xmm1, %xmm0, %xmm0
+; AVX2-NEXT: vpsrld $16, %xmm0, %xmm1
+; AVX2-NEXT: vpaddd %xmm1, %xmm0, %xmm0
+; AVX2-NEXT: vpbroadcastd {{.*}}(%rip), %xmm1
+; AVX2-NEXT: vpand %xmm1, %xmm0, %xmm0
+; AVX2-NEXT: retq
+ %out = call <4 x i32> @llvm.ctpop.v4i32(<4 x i32> %in)
+ ret <4 x i32> %out
+}
+
+define <8 x i16> @testv8i16(<8 x i16> %in) {
+; SSE2-LABEL: testv8i16:
+; SSE2: # BB#0:
+; SSE2-NEXT: pextrw $7, %xmm0, %eax
+; SSE2-NEXT: movl %eax, %ecx
+; SSE2-NEXT: shrl %ecx
+; SSE2-NEXT: andl $21845, %ecx # imm = 0x5555
+; SSE2-NEXT: subl %ecx, %eax
+; SSE2-NEXT: movl %eax, %ecx
+; SSE2-NEXT: andl $13107, %ecx # imm = 0x3333
+; SSE2-NEXT: shrl $2, %eax
+; SSE2-NEXT: andl $13107, %eax # imm = 0x3333
+; SSE2-NEXT: addl %ecx, %eax
+; SSE2-NEXT: movl %eax, %ecx
+; SSE2-NEXT: andl $65520, %ecx # imm = 0xFFF0
+; SSE2-NEXT: shrl $4, %ecx
+; SSE2-NEXT: addl %eax, %ecx
+; SSE2-NEXT: andl $3855, %ecx # imm = 0xF0F
+; SSE2-NEXT: imull $257, %ecx, %eax # imm = 0x101
+; SSE2-NEXT: movzbl %ah, %eax # NOREX
+; SSE2-NEXT: movd %eax, %xmm1
+; SSE2-NEXT: pextrw $3, %xmm0, %eax
+; SSE2-NEXT: movl %eax, %ecx
+; SSE2-NEXT: shrl %ecx
+; SSE2-NEXT: andl $21845, %ecx # imm = 0x5555
+; SSE2-NEXT: subl %ecx, %eax
+; SSE2-NEXT: movl %eax, %ecx
+; SSE2-NEXT: andl $13107, %ecx # imm = 0x3333
+; SSE2-NEXT: shrl $2, %eax
+; SSE2-NEXT: andl $13107, %eax # imm = 0x3333
+; SSE2-NEXT: addl %ecx, %eax
+; SSE2-NEXT: movl %eax, %ecx
+; SSE2-NEXT: andl $65520, %ecx # imm = 0xFFF0
+; SSE2-NEXT: shrl $4, %ecx
+; SSE2-NEXT: addl %eax, %ecx
+; SSE2-NEXT: andl $3855, %ecx # imm = 0xF0F
+; SSE2-NEXT: imull $257, %ecx, %eax # imm = 0x101
+; SSE2-NEXT: movzbl %ah, %eax # NOREX
+; SSE2-NEXT: movd %eax, %xmm2
+; SSE2-NEXT: punpcklwd {{.*#+}} xmm2 = xmm2[0],xmm1[0],xmm2[1],xmm1[1],xmm2[2],xmm1[2],xmm2[3],xmm1[3]
+; SSE2-NEXT: pextrw $5, %xmm0, %eax
+; SSE2-NEXT: movl %eax, %ecx
+; SSE2-NEXT: shrl %ecx
+; SSE2-NEXT: andl $21845, %ecx # imm = 0x5555
+; SSE2-NEXT: subl %ecx, %eax
+; SSE2-NEXT: movl %eax, %ecx
+; SSE2-NEXT: andl $13107, %ecx # imm = 0x3333
+; SSE2-NEXT: shrl $2, %eax
+; SSE2-NEXT: andl $13107, %eax # imm = 0x3333
+; SSE2-NEXT: addl %ecx, %eax
+; SSE2-NEXT: movl %eax, %ecx
+; SSE2-NEXT: andl $65520, %ecx # imm = 0xFFF0
+; SSE2-NEXT: shrl $4, %ecx
+; SSE2-NEXT: addl %eax, %ecx
+; SSE2-NEXT: andl $3855, %ecx # imm = 0xF0F
+; SSE2-NEXT: imull $257, %ecx, %eax # imm = 0x101
+; SSE2-NEXT: movzbl %ah, %eax # NOREX
+; SSE2-NEXT: movd %eax, %xmm3
+; SSE2-NEXT: pextrw $1, %xmm0, %eax
+; SSE2-NEXT: movl %eax, %ecx
+; SSE2-NEXT: shrl %ecx
+; SSE2-NEXT: andl $21845, %ecx # imm = 0x5555
+; SSE2-NEXT: subl %ecx, %eax
+; SSE2-NEXT: movl %eax, %ecx
+; SSE2-NEXT: andl $13107, %ecx # imm = 0x3333
+; SSE2-NEXT: shrl $2, %eax
+; SSE2-NEXT: andl $13107, %eax # imm = 0x3333
+; SSE2-NEXT: addl %ecx, %eax
+; SSE2-NEXT: movl %eax, %ecx
+; SSE2-NEXT: andl $65520, %ecx # imm = 0xFFF0
+; SSE2-NEXT: shrl $4, %ecx
+; SSE2-NEXT: addl %eax, %ecx
+; SSE2-NEXT: andl $3855, %ecx # imm = 0xF0F
+; SSE2-NEXT: imull $257, %ecx, %eax # imm = 0x101
+; SSE2-NEXT: movzbl %ah, %eax # NOREX
+; SSE2-NEXT: movd %eax, %xmm1
+; SSE2-NEXT: punpcklwd {{.*#+}} xmm1 = xmm1[0],xmm3[0],xmm1[1],xmm3[1],xmm1[2],xmm3[2],xmm1[3],xmm3[3]
+; SSE2-NEXT: punpcklwd {{.*#+}} xmm1 = xmm1[0],xmm2[0],xmm1[1],xmm2[1],xmm1[2],xmm2[2],xmm1[3],xmm2[3]
+; SSE2-NEXT: pextrw $6, %xmm0, %eax
+; SSE2-NEXT: movl %eax, %ecx
+; SSE2-NEXT: shrl %ecx
+; SSE2-NEXT: andl $21845, %ecx # imm = 0x5555
+; SSE2-NEXT: subl %ecx, %eax
+; SSE2-NEXT: movl %eax, %ecx
+; SSE2-NEXT: andl $13107, %ecx # imm = 0x3333
+; SSE2-NEXT: shrl $2, %eax
+; SSE2-NEXT: andl $13107, %eax # imm = 0x3333
+; SSE2-NEXT: addl %ecx, %eax
+; SSE2-NEXT: movl %eax, %ecx
+; SSE2-NEXT: andl $65520, %ecx # imm = 0xFFF0
+; SSE2-NEXT: shrl $4, %ecx
+; SSE2-NEXT: addl %eax, %ecx
+; SSE2-NEXT: andl $3855, %ecx # imm = 0xF0F
+; SSE2-NEXT: imull $257, %ecx, %eax # imm = 0x101
+; SSE2-NEXT: movzbl %ah, %eax # NOREX
+; SSE2-NEXT: movd %eax, %xmm3
+; SSE2-NEXT: pextrw $2, %xmm0, %eax
+; SSE2-NEXT: movl %eax, %ecx
+; SSE2-NEXT: shrl %ecx
+; SSE2-NEXT: andl $21845, %ecx # imm = 0x5555
+; SSE2-NEXT: subl %ecx, %eax
+; SSE2-NEXT: movl %eax, %ecx
+; SSE2-NEXT: andl $13107, %ecx # imm = 0x3333
+; SSE2-NEXT: shrl $2, %eax
+; SSE2-NEXT: andl $13107, %eax # imm = 0x3333
+; SSE2-NEXT: addl %ecx, %eax
+; SSE2-NEXT: movl %eax, %ecx
+; SSE2-NEXT: andl $65520, %ecx # imm = 0xFFF0
+; SSE2-NEXT: shrl $4, %ecx
+; SSE2-NEXT: addl %eax, %ecx
+; SSE2-NEXT: andl $3855, %ecx # imm = 0xF0F
+; SSE2-NEXT: imull $257, %ecx, %eax # imm = 0x101
+; SSE2-NEXT: movzbl %ah, %eax # NOREX
+; SSE2-NEXT: movd %eax, %xmm2
+; SSE2-NEXT: punpcklwd {{.*#+}} xmm2 = xmm2[0],xmm3[0],xmm2[1],xmm3[1],xmm2[2],xmm3[2],xmm2[3],xmm3[3]
+; SSE2-NEXT: pextrw $4, %xmm0, %eax
+; SSE2-NEXT: movl %eax, %ecx
+; SSE2-NEXT: shrl %ecx
+; SSE2-NEXT: andl $21845, %ecx # imm = 0x5555
+; SSE2-NEXT: subl %ecx, %eax
+; SSE2-NEXT: movl %eax, %ecx
+; SSE2-NEXT: andl $13107, %ecx # imm = 0x3333
+; SSE2-NEXT: shrl $2, %eax
+; SSE2-NEXT: andl $13107, %eax # imm = 0x3333
+; SSE2-NEXT: addl %ecx, %eax
+; SSE2-NEXT: movl %eax, %ecx
+; SSE2-NEXT: andl $65520, %ecx # imm = 0xFFF0
+; SSE2-NEXT: shrl $4, %ecx
+; SSE2-NEXT: addl %eax, %ecx
+; SSE2-NEXT: andl $3855, %ecx # imm = 0xF0F
+; SSE2-NEXT: imull $257, %ecx, %eax # imm = 0x101
+; SSE2-NEXT: movzbl %ah, %eax # NOREX
+; SSE2-NEXT: movd %eax, %xmm3
+; SSE2-NEXT: movd %xmm0, %eax
+; SSE2-NEXT: movl %eax, %ecx
+; SSE2-NEXT: shrl %ecx
+; SSE2-NEXT: andl $21845, %ecx # imm = 0x5555
+; SSE2-NEXT: subl %ecx, %eax
+; SSE2-NEXT: movl %eax, %ecx
+; SSE2-NEXT: andl $13107, %ecx # imm = 0x3333
+; SSE2-NEXT: shrl $2, %eax
+; SSE2-NEXT: andl $13107, %eax # imm = 0x3333
+; SSE2-NEXT: addl %ecx, %eax
+; SSE2-NEXT: movl %eax, %ecx
+; SSE2-NEXT: andl $65520, %ecx # imm = 0xFFF0
+; SSE2-NEXT: shrl $4, %ecx
+; SSE2-NEXT: addl %eax, %ecx
+; SSE2-NEXT: andl $3855, %ecx # imm = 0xF0F
+; SSE2-NEXT: imull $257, %ecx, %eax # imm = 0x101
+; SSE2-NEXT: movzbl %ah, %eax # NOREX
+; SSE2-NEXT: movd %eax, %xmm0
+; SSE2-NEXT: punpcklwd {{.*#+}} xmm0 = xmm0[0],xmm3[0],xmm0[1],xmm3[1],xmm0[2],xmm3[2],xmm0[3],xmm3[3]
+; SSE2-NEXT: punpcklwd {{.*#+}} xmm0 = xmm0[0],xmm2[0],xmm0[1],xmm2[1],xmm0[2],xmm2[2],xmm0[3],xmm2[3]
+; SSE2-NEXT: punpcklwd {{.*#+}} xmm0 = xmm0[0],xmm1[0],xmm0[1],xmm1[1],xmm0[2],xmm1[2],xmm0[3],xmm1[3]
+; SSE2-NEXT: retq
+;
+; SSE3-LABEL: testv8i16:
+; SSE3: # BB#0:
+; SSE3-NEXT: pextrw $7, %xmm0, %eax
+; SSE3-NEXT: movl %eax, %ecx
+; SSE3-NEXT: shrl %ecx
+; SSE3-NEXT: andl $21845, %ecx # imm = 0x5555
+; SSE3-NEXT: subl %ecx, %eax
+; SSE3-NEXT: movl %eax, %ecx
+; SSE3-NEXT: andl $13107, %ecx # imm = 0x3333
+; SSE3-NEXT: shrl $2, %eax
+; SSE3-NEXT: andl $13107, %eax # imm = 0x3333
+; SSE3-NEXT: addl %ecx, %eax
+; SSE3-NEXT: movl %eax, %ecx
+; SSE3-NEXT: andl $65520, %ecx # imm = 0xFFF0
+; SSE3-NEXT: shrl $4, %ecx
+; SSE3-NEXT: addl %eax, %ecx
+; SSE3-NEXT: andl $3855, %ecx # imm = 0xF0F
+; SSE3-NEXT: imull $257, %ecx, %eax # imm = 0x101
+; SSE3-NEXT: movzbl %ah, %eax # NOREX
+; SSE3-NEXT: movd %eax, %xmm1
+; SSE3-NEXT: pextrw $3, %xmm0, %eax
+; SSE3-NEXT: movl %eax, %ecx
+; SSE3-NEXT: shrl %ecx
+; SSE3-NEXT: andl $21845, %ecx # imm = 0x5555
+; SSE3-NEXT: subl %ecx, %eax
+; SSE3-NEXT: movl %eax, %ecx
+; SSE3-NEXT: andl $13107, %ecx # imm = 0x3333
+; SSE3-NEXT: shrl $2, %eax
+; SSE3-NEXT: andl $13107, %eax # imm = 0x3333
+; SSE3-NEXT: addl %ecx, %eax
+; SSE3-NEXT: movl %eax, %ecx
+; SSE3-NEXT: andl $65520, %ecx # imm = 0xFFF0
+; SSE3-NEXT: shrl $4, %ecx
+; SSE3-NEXT: addl %eax, %ecx
+; SSE3-NEXT: andl $3855, %ecx # imm = 0xF0F
+; SSE3-NEXT: imull $257, %ecx, %eax # imm = 0x101
+; SSE3-NEXT: movzbl %ah, %eax # NOREX
+; SSE3-NEXT: movd %eax, %xmm2
+; SSE3-NEXT: punpcklwd {{.*#+}} xmm2 = xmm2[0],xmm1[0],xmm2[1],xmm1[1],xmm2[2],xmm1[2],xmm2[3],xmm1[3]
+; SSE3-NEXT: pextrw $5, %xmm0, %eax
+; SSE3-NEXT: movl %eax, %ecx
+; SSE3-NEXT: shrl %ecx
+; SSE3-NEXT: andl $21845, %ecx # imm = 0x5555
+; SSE3-NEXT: subl %ecx, %eax
+; SSE3-NEXT: movl %eax, %ecx
+; SSE3-NEXT: andl $13107, %ecx # imm = 0x3333
+; SSE3-NEXT: shrl $2, %eax
+; SSE3-NEXT: andl $13107, %eax # imm = 0x3333
+; SSE3-NEXT: addl %ecx, %eax
+; SSE3-NEXT: movl %eax, %ecx
+; SSE3-NEXT: andl $65520, %ecx # imm = 0xFFF0
+; SSE3-NEXT: shrl $4, %ecx
+; SSE3-NEXT: addl %eax, %ecx
+; SSE3-NEXT: andl $3855, %ecx # imm = 0xF0F
+; SSE3-NEXT: imull $257, %ecx, %eax # imm = 0x101
+; SSE3-NEXT: movzbl %ah, %eax # NOREX
+; SSE3-NEXT: movd %eax, %xmm3
+; SSE3-NEXT: pextrw $1, %xmm0, %eax
+; SSE3-NEXT: movl %eax, %ecx
+; SSE3-NEXT: shrl %ecx
+; SSE3-NEXT: andl $21845, %ecx # imm = 0x5555
+; SSE3-NEXT: subl %ecx, %eax
+; SSE3-NEXT: movl %eax, %ecx
+; SSE3-NEXT: andl $13107, %ecx # imm = 0x3333
+; SSE3-NEXT: shrl $2, %eax
+; SSE3-NEXT: andl $13107, %eax # imm = 0x3333
+; SSE3-NEXT: addl %ecx, %eax
+; SSE3-NEXT: movl %eax, %ecx
+; SSE3-NEXT: andl $65520, %ecx # imm = 0xFFF0
+; SSE3-NEXT: shrl $4, %ecx
+; SSE3-NEXT: addl %eax, %ecx
+; SSE3-NEXT: andl $3855, %ecx # imm = 0xF0F
+; SSE3-NEXT: imull $257, %ecx, %eax # imm = 0x101
+; SSE3-NEXT: movzbl %ah, %eax # NOREX
+; SSE3-NEXT: movd %eax, %xmm1
+; SSE3-NEXT: punpcklwd {{.*#+}} xmm1 = xmm1[0],xmm3[0],xmm1[1],xmm3[1],xmm1[2],xmm3[2],xmm1[3],xmm3[3]
+; SSE3-NEXT: punpcklwd {{.*#+}} xmm1 = xmm1[0],xmm2[0],xmm1[1],xmm2[1],xmm1[2],xmm2[2],xmm1[3],xmm2[3]
+; SSE3-NEXT: pextrw $6, %xmm0, %eax
+; SSE3-NEXT: movl %eax, %ecx
+; SSE3-NEXT: shrl %ecx
+; SSE3-NEXT: andl $21845, %ecx # imm = 0x5555
+; SSE3-NEXT: subl %ecx, %eax
+; SSE3-NEXT: movl %eax, %ecx
+; SSE3-NEXT: andl $13107, %ecx # imm = 0x3333
+; SSE3-NEXT: shrl $2, %eax
+; SSE3-NEXT: andl $13107, %eax # imm = 0x3333
+; SSE3-NEXT: addl %ecx, %eax
+; SSE3-NEXT: movl %eax, %ecx
+; SSE3-NEXT: andl $65520, %ecx # imm = 0xFFF0
+; SSE3-NEXT: shrl $4, %ecx
+; SSE3-NEXT: addl %eax, %ecx
+; SSE3-NEXT: andl $3855, %ecx # imm = 0xF0F
+; SSE3-NEXT: imull $257, %ecx, %eax # imm = 0x101
+; SSE3-NEXT: movzbl %ah, %eax # NOREX
+; SSE3-NEXT: movd %eax, %xmm3
+; SSE3-NEXT: pextrw $2, %xmm0, %eax
+; SSE3-NEXT: movl %eax, %ecx
+; SSE3-NEXT: shrl %ecx
+; SSE3-NEXT: andl $21845, %ecx # imm = 0x5555
+; SSE3-NEXT: subl %ecx, %eax
+; SSE3-NEXT: movl %eax, %ecx
+; SSE3-NEXT: andl $13107, %ecx # imm = 0x3333
+; SSE3-NEXT: shrl $2, %eax
+; SSE3-NEXT: andl $13107, %eax # imm = 0x3333
+; SSE3-NEXT: addl %ecx, %eax
+; SSE3-NEXT: movl %eax, %ecx
+; SSE3-NEXT: andl $65520, %ecx # imm = 0xFFF0
+; SSE3-NEXT: shrl $4, %ecx
+; SSE3-NEXT: addl %eax, %ecx
+; SSE3-NEXT: andl $3855, %ecx # imm = 0xF0F
+; SSE3-NEXT: imull $257, %ecx, %eax # imm = 0x101
+; SSE3-NEXT: movzbl %ah, %eax # NOREX
+; SSE3-NEXT: movd %eax, %xmm2
+; SSE3-NEXT: punpcklwd {{.*#+}} xmm2 = xmm2[0],xmm3[0],xmm2[1],xmm3[1],xmm2[2],xmm3[2],xmm2[3],xmm3[3]
+; SSE3-NEXT: pextrw $4, %xmm0, %eax
+; SSE3-NEXT: movl %eax, %ecx
+; SSE3-NEXT: shrl %ecx
+; SSE3-NEXT: andl $21845, %ecx # imm = 0x5555
+; SSE3-NEXT: subl %ecx, %eax
+; SSE3-NEXT: movl %eax, %ecx
+; SSE3-NEXT: andl $13107, %ecx # imm = 0x3333
+; SSE3-NEXT: shrl $2, %eax
+; SSE3-NEXT: andl $13107, %eax # imm = 0x3333
+; SSE3-NEXT: addl %ecx, %eax
+; SSE3-NEXT: movl %eax, %ecx
+; SSE3-NEXT: andl $65520, %ecx # imm = 0xFFF0
+; SSE3-NEXT: shrl $4, %ecx
+; SSE3-NEXT: addl %eax, %ecx
+; SSE3-NEXT: andl $3855, %ecx # imm = 0xF0F
+; SSE3-NEXT: imull $257, %ecx, %eax # imm = 0x101
+; SSE3-NEXT: movzbl %ah, %eax # NOREX
+; SSE3-NEXT: movd %eax, %xmm3
+; SSE3-NEXT: movd %xmm0, %eax
+; SSE3-NEXT: movl %eax, %ecx
+; SSE3-NEXT: shrl %ecx
+; SSE3-NEXT: andl $21845, %ecx # imm = 0x5555
+; SSE3-NEXT: subl %ecx, %eax
+; SSE3-NEXT: movl %eax, %ecx
+; SSE3-NEXT: andl $13107, %ecx # imm = 0x3333
+; SSE3-NEXT: shrl $2, %eax
+; SSE3-NEXT: andl $13107, %eax # imm = 0x3333
+; SSE3-NEXT: addl %ecx, %eax
+; SSE3-NEXT: movl %eax, %ecx
+; SSE3-NEXT: andl $65520, %ecx # imm = 0xFFF0
+; SSE3-NEXT: shrl $4, %ecx
+; SSE3-NEXT: addl %eax, %ecx
+; SSE3-NEXT: andl $3855, %ecx # imm = 0xF0F
+; SSE3-NEXT: imull $257, %ecx, %eax # imm = 0x101
+; SSE3-NEXT: movzbl %ah, %eax # NOREX
+; SSE3-NEXT: movd %eax, %xmm0
+; SSE3-NEXT: punpcklwd {{.*#+}} xmm0 = xmm0[0],xmm3[0],xmm0[1],xmm3[1],xmm0[2],xmm3[2],xmm0[3],xmm3[3]
+; SSE3-NEXT: punpcklwd {{.*#+}} xmm0 = xmm0[0],xmm2[0],xmm0[1],xmm2[1],xmm0[2],xmm2[2],xmm0[3],xmm2[3]
+; SSE3-NEXT: punpcklwd {{.*#+}} xmm0 = xmm0[0],xmm1[0],xmm0[1],xmm1[1],xmm0[2],xmm1[2],xmm0[3],xmm1[3]
+; SSE3-NEXT: retq
+;
+; SSSE3-LABEL: testv8i16:
+; SSSE3: # BB#0:
+; SSSE3-NEXT: pextrw $7, %xmm0, %eax
+; SSSE3-NEXT: movl %eax, %ecx
+; SSSE3-NEXT: shrl %ecx
+; SSSE3-NEXT: andl $21845, %ecx # imm = 0x5555
+; SSSE3-NEXT: subl %ecx, %eax
+; SSSE3-NEXT: movl %eax, %ecx
+; SSSE3-NEXT: andl $13107, %ecx # imm = 0x3333
+; SSSE3-NEXT: shrl $2, %eax
+; SSSE3-NEXT: andl $13107, %eax # imm = 0x3333
+; SSSE3-NEXT: addl %ecx, %eax
+; SSSE3-NEXT: movl %eax, %ecx
+; SSSE3-NEXT: andl $65520, %ecx # imm = 0xFFF0
+; SSSE3-NEXT: shrl $4, %ecx
+; SSSE3-NEXT: addl %eax, %ecx
+; SSSE3-NEXT: andl $3855, %ecx # imm = 0xF0F
+; SSSE3-NEXT: imull $257, %ecx, %eax # imm = 0x101
+; SSSE3-NEXT: movzbl %ah, %eax # NOREX
+; SSSE3-NEXT: movd %eax, %xmm1
+; SSSE3-NEXT: pextrw $3, %xmm0, %eax
+; SSSE3-NEXT: movl %eax, %ecx
+; SSSE3-NEXT: shrl %ecx
+; SSSE3-NEXT: andl $21845, %ecx # imm = 0x5555
+; SSSE3-NEXT: subl %ecx, %eax
+; SSSE3-NEXT: movl %eax, %ecx
+; SSSE3-NEXT: andl $13107, %ecx # imm = 0x3333
+; SSSE3-NEXT: shrl $2, %eax
+; SSSE3-NEXT: andl $13107, %eax # imm = 0x3333
+; SSSE3-NEXT: addl %ecx, %eax
+; SSSE3-NEXT: movl %eax, %ecx
+; SSSE3-NEXT: andl $65520, %ecx # imm = 0xFFF0
+; SSSE3-NEXT: shrl $4, %ecx
+; SSSE3-NEXT: addl %eax, %ecx
+; SSSE3-NEXT: andl $3855, %ecx # imm = 0xF0F
+; SSSE3-NEXT: imull $257, %ecx, %eax # imm = 0x101
+; SSSE3-NEXT: movzbl %ah, %eax # NOREX
+; SSSE3-NEXT: movd %eax, %xmm2
+; SSSE3-NEXT: punpcklwd {{.*#+}} xmm2 = xmm2[0],xmm1[0],xmm2[1],xmm1[1],xmm2[2],xmm1[2],xmm2[3],xmm1[3]
+; SSSE3-NEXT: pextrw $5, %xmm0, %eax
+; SSSE3-NEXT: movl %eax, %ecx
+; SSSE3-NEXT: shrl %ecx
+; SSSE3-NEXT: andl $21845, %ecx # imm = 0x5555
+; SSSE3-NEXT: subl %ecx, %eax
+; SSSE3-NEXT: movl %eax, %ecx
+; SSSE3-NEXT: andl $13107, %ecx # imm = 0x3333
+; SSSE3-NEXT: shrl $2, %eax
+; SSSE3-NEXT: andl $13107, %eax # imm = 0x3333
+; SSSE3-NEXT: addl %ecx, %eax
+; SSSE3-NEXT: movl %eax, %ecx
+; SSSE3-NEXT: andl $65520, %ecx # imm = 0xFFF0
+; SSSE3-NEXT: shrl $4, %ecx
+; SSSE3-NEXT: addl %eax, %ecx
+; SSSE3-NEXT: andl $3855, %ecx # imm = 0xF0F
+; SSSE3-NEXT: imull $257, %ecx, %eax # imm = 0x101
+; SSSE3-NEXT: movzbl %ah, %eax # NOREX
+; SSSE3-NEXT: movd %eax, %xmm3
+; SSSE3-NEXT: pextrw $1, %xmm0, %eax
+; SSSE3-NEXT: movl %eax, %ecx
+; SSSE3-NEXT: shrl %ecx
+; SSSE3-NEXT: andl $21845, %ecx # imm = 0x5555
+; SSSE3-NEXT: subl %ecx, %eax
+; SSSE3-NEXT: movl %eax, %ecx
+; SSSE3-NEXT: andl $13107, %ecx # imm = 0x3333
+; SSSE3-NEXT: shrl $2, %eax
+; SSSE3-NEXT: andl $13107, %eax # imm = 0x3333
+; SSSE3-NEXT: addl %ecx, %eax
+; SSSE3-NEXT: movl %eax, %ecx
+; SSSE3-NEXT: andl $65520, %ecx # imm = 0xFFF0
+; SSSE3-NEXT: shrl $4, %ecx
+; SSSE3-NEXT: addl %eax, %ecx
+; SSSE3-NEXT: andl $3855, %ecx # imm = 0xF0F
+; SSSE3-NEXT: imull $257, %ecx, %eax # imm = 0x101
+; SSSE3-NEXT: movzbl %ah, %eax # NOREX
+; SSSE3-NEXT: movd %eax, %xmm1
+; SSSE3-NEXT: punpcklwd {{.*#+}} xmm1 = xmm1[0],xmm3[0],xmm1[1],xmm3[1],xmm1[2],xmm3[2],xmm1[3],xmm3[3]
+; SSSE3-NEXT: punpcklwd {{.*#+}} xmm1 = xmm1[0],xmm2[0],xmm1[1],xmm2[1],xmm1[2],xmm2[2],xmm1[3],xmm2[3]
+; SSSE3-NEXT: pextrw $6, %xmm0, %eax
+; SSSE3-NEXT: movl %eax, %ecx
+; SSSE3-NEXT: shrl %ecx
+; SSSE3-NEXT: andl $21845, %ecx # imm = 0x5555
+; SSSE3-NEXT: subl %ecx, %eax
+; SSSE3-NEXT: movl %eax, %ecx
+; SSSE3-NEXT: andl $13107, %ecx # imm = 0x3333
+; SSSE3-NEXT: shrl $2, %eax
+; SSSE3-NEXT: andl $13107, %eax # imm = 0x3333
+; SSSE3-NEXT: addl %ecx, %eax
+; SSSE3-NEXT: movl %eax, %ecx
+; SSSE3-NEXT: andl $65520, %ecx # imm = 0xFFF0
+; SSSE3-NEXT: shrl $4, %ecx
+; SSSE3-NEXT: addl %eax, %ecx
+; SSSE3-NEXT: andl $3855, %ecx # imm = 0xF0F
+; SSSE3-NEXT: imull $257, %ecx, %eax # imm = 0x101
+; SSSE3-NEXT: movzbl %ah, %eax # NOREX
+; SSSE3-NEXT: movd %eax, %xmm3
+; SSSE3-NEXT: pextrw $2, %xmm0, %eax
+; SSSE3-NEXT: movl %eax, %ecx
+; SSSE3-NEXT: shrl %ecx
+; SSSE3-NEXT: andl $21845, %ecx # imm = 0x5555
+; SSSE3-NEXT: subl %ecx, %eax
+; SSSE3-NEXT: movl %eax, %ecx
+; SSSE3-NEXT: andl $13107, %ecx # imm = 0x3333
+; SSSE3-NEXT: shrl $2, %eax
+; SSSE3-NEXT: andl $13107, %eax # imm = 0x3333
+; SSSE3-NEXT: addl %ecx, %eax
+; SSSE3-NEXT: movl %eax, %ecx
+; SSSE3-NEXT: andl $65520, %ecx # imm = 0xFFF0
+; SSSE3-NEXT: shrl $4, %ecx
+; SSSE3-NEXT: addl %eax, %ecx
+; SSSE3-NEXT: andl $3855, %ecx # imm = 0xF0F
+; SSSE3-NEXT: imull $257, %ecx, %eax # imm = 0x101
+; SSSE3-NEXT: movzbl %ah, %eax # NOREX
+; SSSE3-NEXT: movd %eax, %xmm2
+; SSSE3-NEXT: punpcklwd {{.*#+}} xmm2 = xmm2[0],xmm3[0],xmm2[1],xmm3[1],xmm2[2],xmm3[2],xmm2[3],xmm3[3]
+; SSSE3-NEXT: pextrw $4, %xmm0, %eax
+; SSSE3-NEXT: movl %eax, %ecx
+; SSSE3-NEXT: shrl %ecx
+; SSSE3-NEXT: andl $21845, %ecx # imm = 0x5555
+; SSSE3-NEXT: subl %ecx, %eax
+; SSSE3-NEXT: movl %eax, %ecx
+; SSSE3-NEXT: andl $13107, %ecx # imm = 0x3333
+; SSSE3-NEXT: shrl $2, %eax
+; SSSE3-NEXT: andl $13107, %eax # imm = 0x3333
+; SSSE3-NEXT: addl %ecx, %eax
+; SSSE3-NEXT: movl %eax, %ecx
+; SSSE3-NEXT: andl $65520, %ecx # imm = 0xFFF0
+; SSSE3-NEXT: shrl $4, %ecx
+; SSSE3-NEXT: addl %eax, %ecx
+; SSSE3-NEXT: andl $3855, %ecx # imm = 0xF0F
+; SSSE3-NEXT: imull $257, %ecx, %eax # imm = 0x101
+; SSSE3-NEXT: movzbl %ah, %eax # NOREX
+; SSSE3-NEXT: movd %eax, %xmm3
+; SSSE3-NEXT: movd %xmm0, %eax
+; SSSE3-NEXT: movl %eax, %ecx
+; SSSE3-NEXT: shrl %ecx
+; SSSE3-NEXT: andl $21845, %ecx # imm = 0x5555
+; SSSE3-NEXT: subl %ecx, %eax
+; SSSE3-NEXT: movl %eax, %ecx
+; SSSE3-NEXT: andl $13107, %ecx # imm = 0x3333
+; SSSE3-NEXT: shrl $2, %eax
+; SSSE3-NEXT: andl $13107, %eax # imm = 0x3333
+; SSSE3-NEXT: addl %ecx, %eax
+; SSSE3-NEXT: movl %eax, %ecx
+; SSSE3-NEXT: andl $65520, %ecx # imm = 0xFFF0
+; SSSE3-NEXT: shrl $4, %ecx
+; SSSE3-NEXT: addl %eax, %ecx
+; SSSE3-NEXT: andl $3855, %ecx # imm = 0xF0F
+; SSSE3-NEXT: imull $257, %ecx, %eax # imm = 0x101
+; SSSE3-NEXT: movzbl %ah, %eax # NOREX
+; SSSE3-NEXT: movd %eax, %xmm0
+; SSSE3-NEXT: punpcklwd {{.*#+}} xmm0 = xmm0[0],xmm3[0],xmm0[1],xmm3[1],xmm0[2],xmm3[2],xmm0[3],xmm3[3]
+; SSSE3-NEXT: punpcklwd {{.*#+}} xmm0 = xmm0[0],xmm2[0],xmm0[1],xmm2[1],xmm0[2],xmm2[2],xmm0[3],xmm2[3]
+; SSSE3-NEXT: punpcklwd {{.*#+}} xmm0 = xmm0[0],xmm1[0],xmm0[1],xmm1[1],xmm0[2],xmm1[2],xmm0[3],xmm1[3]
+; SSSE3-NEXT: retq
+;
+; SSE41-LABEL: testv8i16:
+; SSE41: # BB#0:
+; SSE41-NEXT: pextrw $1, %xmm0, %eax
+; SSE41-NEXT: movl %eax, %ecx
+; SSE41-NEXT: shrl %ecx
+; SSE41-NEXT: andl $21845, %ecx # imm = 0x5555
+; SSE41-NEXT: subl %ecx, %eax
+; SSE41-NEXT: movl %eax, %ecx
+; SSE41-NEXT: andl $13107, %ecx # imm = 0x3333
+; SSE41-NEXT: shrl $2, %eax
+; SSE41-NEXT: andl $13107, %eax # imm = 0x3333
+; SSE41-NEXT: addl %ecx, %eax
+; SSE41-NEXT: movl %eax, %ecx
+; SSE41-NEXT: andl $65520, %ecx # imm = 0xFFF0
+; SSE41-NEXT: shrl $4, %ecx
+; SSE41-NEXT: addl %eax, %ecx
+; SSE41-NEXT: andl $3855, %ecx # imm = 0xF0F
+; SSE41-NEXT: imull $257, %ecx, %eax # imm = 0x101
+; SSE41-NEXT: movzbl %ah, %eax # NOREX
+; SSE41-NEXT: movd %xmm0, %ecx
+; SSE41-NEXT: movl %ecx, %edx
+; SSE41-NEXT: shrl %edx
+; SSE41-NEXT: andl $21845, %edx # imm = 0x5555
+; SSE41-NEXT: subl %edx, %ecx
+; SSE41-NEXT: movl %ecx, %edx
+; SSE41-NEXT: andl $13107, %edx # imm = 0x3333
+; SSE41-NEXT: shrl $2, %ecx
+; SSE41-NEXT: andl $13107, %ecx # imm = 0x3333
+; SSE41-NEXT: addl %edx, %ecx
+; SSE41-NEXT: movl %ecx, %edx
+; SSE41-NEXT: andl $65520, %edx # imm = 0xFFF0
+; SSE41-NEXT: shrl $4, %edx
+; SSE41-NEXT: addl %ecx, %edx
+; SSE41-NEXT: andl $3855, %edx # imm = 0xF0F
+; SSE41-NEXT: imull $257, %edx, %ecx # imm = 0x101
+; SSE41-NEXT: movzbl %ch, %ecx # NOREX
+; SSE41-NEXT: movd %ecx, %xmm1
+; SSE41-NEXT: pinsrw $1, %eax, %xmm1
+; SSE41-NEXT: pextrw $2, %xmm0, %eax
+; SSE41-NEXT: movl %eax, %ecx
+; SSE41-NEXT: shrl %ecx
+; SSE41-NEXT: andl $21845, %ecx # imm = 0x5555
+; SSE41-NEXT: subl %ecx, %eax
+; SSE41-NEXT: movl %eax, %ecx
+; SSE41-NEXT: andl $13107, %ecx # imm = 0x3333
+; SSE41-NEXT: shrl $2, %eax
+; SSE41-NEXT: andl $13107, %eax # imm = 0x3333
+; SSE41-NEXT: addl %ecx, %eax
+; SSE41-NEXT: movl %eax, %ecx
+; SSE41-NEXT: andl $65520, %ecx # imm = 0xFFF0
+; SSE41-NEXT: shrl $4, %ecx
+; SSE41-NEXT: addl %eax, %ecx
+; SSE41-NEXT: andl $3855, %ecx # imm = 0xF0F
+; SSE41-NEXT: imull $257, %ecx, %eax # imm = 0x101
+; SSE41-NEXT: movzbl %ah, %eax # NOREX
+; SSE41-NEXT: pinsrw $2, %eax, %xmm1
+; SSE41-NEXT: pextrw $3, %xmm0, %eax
+; SSE41-NEXT: movl %eax, %ecx
+; SSE41-NEXT: shrl %ecx
+; SSE41-NEXT: andl $21845, %ecx # imm = 0x5555
+; SSE41-NEXT: subl %ecx, %eax
+; SSE41-NEXT: movl %eax, %ecx
+; SSE41-NEXT: andl $13107, %ecx # imm = 0x3333
+; SSE41-NEXT: shrl $2, %eax
+; SSE41-NEXT: andl $13107, %eax # imm = 0x3333
+; SSE41-NEXT: addl %ecx, %eax
+; SSE41-NEXT: movl %eax, %ecx
+; SSE41-NEXT: andl $65520, %ecx # imm = 0xFFF0
+; SSE41-NEXT: shrl $4, %ecx
+; SSE41-NEXT: addl %eax, %ecx
+; SSE41-NEXT: andl $3855, %ecx # imm = 0xF0F
+; SSE41-NEXT: imull $257, %ecx, %eax # imm = 0x101
+; SSE41-NEXT: movzbl %ah, %eax # NOREX
+; SSE41-NEXT: pinsrw $3, %eax, %xmm1
+; SSE41-NEXT: pextrw $4, %xmm0, %eax
+; SSE41-NEXT: movl %eax, %ecx
+; SSE41-NEXT: shrl %ecx
+; SSE41-NEXT: andl $21845, %ecx # imm = 0x5555
+; SSE41-NEXT: subl %ecx, %eax
+; SSE41-NEXT: movl %eax, %ecx
+; SSE41-NEXT: andl $13107, %ecx # imm = 0x3333
+; SSE41-NEXT: shrl $2, %eax
+; SSE41-NEXT: andl $13107, %eax # imm = 0x3333
+; SSE41-NEXT: addl %ecx, %eax
+; SSE41-NEXT: movl %eax, %ecx
+; SSE41-NEXT: andl $65520, %ecx # imm = 0xFFF0
+; SSE41-NEXT: shrl $4, %ecx
+; SSE41-NEXT: addl %eax, %ecx
+; SSE41-NEXT: andl $3855, %ecx # imm = 0xF0F
+; SSE41-NEXT: imull $257, %ecx, %eax # imm = 0x101
+; SSE41-NEXT: movzbl %ah, %eax # NOREX
+; SSE41-NEXT: pinsrw $4, %eax, %xmm1
+; SSE41-NEXT: pextrw $5, %xmm0, %eax
+; SSE41-NEXT: movl %eax, %ecx
+; SSE41-NEXT: shrl %ecx
+; SSE41-NEXT: andl $21845, %ecx # imm = 0x5555
+; SSE41-NEXT: subl %ecx, %eax
+; SSE41-NEXT: movl %eax, %ecx
+; SSE41-NEXT: andl $13107, %ecx # imm = 0x3333
+; SSE41-NEXT: shrl $2, %eax
+; SSE41-NEXT: andl $13107, %eax # imm = 0x3333
+; SSE41-NEXT: addl %ecx, %eax
+; SSE41-NEXT: movl %eax, %ecx
+; SSE41-NEXT: andl $65520, %ecx # imm = 0xFFF0
+; SSE41-NEXT: shrl $4, %ecx
+; SSE41-NEXT: addl %eax, %ecx
+; SSE41-NEXT: andl $3855, %ecx # imm = 0xF0F
+; SSE41-NEXT: imull $257, %ecx, %eax # imm = 0x101
+; SSE41-NEXT: movzbl %ah, %eax # NOREX
+; SSE41-NEXT: pinsrw $5, %eax, %xmm1
+; SSE41-NEXT: pextrw $6, %xmm0, %eax
+; SSE41-NEXT: movl %eax, %ecx
+; SSE41-NEXT: shrl %ecx
+; SSE41-NEXT: andl $21845, %ecx # imm = 0x5555
+; SSE41-NEXT: subl %ecx, %eax
+; SSE41-NEXT: movl %eax, %ecx
+; SSE41-NEXT: andl $13107, %ecx # imm = 0x3333
+; SSE41-NEXT: shrl $2, %eax
+; SSE41-NEXT: andl $13107, %eax # imm = 0x3333
+; SSE41-NEXT: addl %ecx, %eax
+; SSE41-NEXT: movl %eax, %ecx
+; SSE41-NEXT: andl $65520, %ecx # imm = 0xFFF0
+; SSE41-NEXT: shrl $4, %ecx
+; SSE41-NEXT: addl %eax, %ecx
+; SSE41-NEXT: andl $3855, %ecx # imm = 0xF0F
+; SSE41-NEXT: imull $257, %ecx, %eax # imm = 0x101
+; SSE41-NEXT: movzbl %ah, %eax # NOREX
+; SSE41-NEXT: pinsrw $6, %eax, %xmm1
+; SSE41-NEXT: pextrw $7, %xmm0, %eax
+; SSE41-NEXT: movl %eax, %ecx
+; SSE41-NEXT: shrl %ecx
+; SSE41-NEXT: andl $21845, %ecx # imm = 0x5555
+; SSE41-NEXT: subl %ecx, %eax
+; SSE41-NEXT: movl %eax, %ecx
+; SSE41-NEXT: andl $13107, %ecx # imm = 0x3333
+; SSE41-NEXT: shrl $2, %eax
+; SSE41-NEXT: andl $13107, %eax # imm = 0x3333
+; SSE41-NEXT: addl %ecx, %eax
+; SSE41-NEXT: movl %eax, %ecx
+; SSE41-NEXT: andl $65520, %ecx # imm = 0xFFF0
+; SSE41-NEXT: shrl $4, %ecx
+; SSE41-NEXT: addl %eax, %ecx
+; SSE41-NEXT: andl $3855, %ecx # imm = 0xF0F
+; SSE41-NEXT: imull $257, %ecx, %eax # imm = 0x101
+; SSE41-NEXT: movzbl %ah, %eax # NOREX
+; SSE41-NEXT: pinsrw $7, %eax, %xmm1
+; SSE41-NEXT: movdqa %xmm1, %xmm0
+; SSE41-NEXT: retq
+;
+; AVX-LABEL: testv8i16:
+; AVX: # BB#0:
+; AVX-NEXT: vpextrw $1, %xmm0, %eax
+; AVX-NEXT: movl %eax, %ecx
+; AVX-NEXT: shrl %ecx
+; AVX-NEXT: andl $21845, %ecx # imm = 0x5555
+; AVX-NEXT: subl %ecx, %eax
+; AVX-NEXT: movl %eax, %ecx
+; AVX-NEXT: andl $13107, %ecx # imm = 0x3333
+; AVX-NEXT: shrl $2, %eax
+; AVX-NEXT: andl $13107, %eax # imm = 0x3333
+; AVX-NEXT: addl %ecx, %eax
+; AVX-NEXT: movl %eax, %ecx
+; AVX-NEXT: andl $65520, %ecx # imm = 0xFFF0
+; AVX-NEXT: shrl $4, %ecx
+; AVX-NEXT: addl %eax, %ecx
+; AVX-NEXT: andl $3855, %ecx # imm = 0xF0F
+; AVX-NEXT: imull $257, %ecx, %eax # imm = 0x101
+; AVX-NEXT: movzbl %ah, %eax # NOREX
+; AVX-NEXT: vmovd %xmm0, %ecx
+; AVX-NEXT: movl %ecx, %edx
+; AVX-NEXT: shrl %edx
+; AVX-NEXT: andl $21845, %edx # imm = 0x5555
+; AVX-NEXT: subl %edx, %ecx
+; AVX-NEXT: movl %ecx, %edx
+; AVX-NEXT: andl $13107, %edx # imm = 0x3333
+; AVX-NEXT: shrl $2, %ecx
+; AVX-NEXT: andl $13107, %ecx # imm = 0x3333
+; AVX-NEXT: addl %edx, %ecx
+; AVX-NEXT: movl %ecx, %edx
+; AVX-NEXT: andl $65520, %edx # imm = 0xFFF0
+; AVX-NEXT: shrl $4, %edx
+; AVX-NEXT: addl %ecx, %edx
+; AVX-NEXT: andl $3855, %edx # imm = 0xF0F
+; AVX-NEXT: imull $257, %edx, %ecx # imm = 0x101
+; AVX-NEXT: movzbl %ch, %ecx # NOREX
+; AVX-NEXT: vmovd %ecx, %xmm1
+; AVX-NEXT: vpinsrw $1, %eax, %xmm1, %xmm1
+; AVX-NEXT: vpextrw $2, %xmm0, %eax
+; AVX-NEXT: movl %eax, %ecx
+; AVX-NEXT: shrl %ecx
+; AVX-NEXT: andl $21845, %ecx # imm = 0x5555
+; AVX-NEXT: subl %ecx, %eax
+; AVX-NEXT: movl %eax, %ecx
+; AVX-NEXT: andl $13107, %ecx # imm = 0x3333
+; AVX-NEXT: shrl $2, %eax
+; AVX-NEXT: andl $13107, %eax # imm = 0x3333
+; AVX-NEXT: addl %ecx, %eax
+; AVX-NEXT: movl %eax, %ecx
+; AVX-NEXT: andl $65520, %ecx # imm = 0xFFF0
+; AVX-NEXT: shrl $4, %ecx
+; AVX-NEXT: addl %eax, %ecx
+; AVX-NEXT: andl $3855, %ecx # imm = 0xF0F
+; AVX-NEXT: imull $257, %ecx, %eax # imm = 0x101
+; AVX-NEXT: movzbl %ah, %eax # NOREX
+; AVX-NEXT: vpinsrw $2, %eax, %xmm1, %xmm1
+; AVX-NEXT: vpextrw $3, %xmm0, %eax
+; AVX-NEXT: movl %eax, %ecx
+; AVX-NEXT: shrl %ecx
+; AVX-NEXT: andl $21845, %ecx # imm = 0x5555
+; AVX-NEXT: subl %ecx, %eax
+; AVX-NEXT: movl %eax, %ecx
+; AVX-NEXT: andl $13107, %ecx # imm = 0x3333
+; AVX-NEXT: shrl $2, %eax
+; AVX-NEXT: andl $13107, %eax # imm = 0x3333
+; AVX-NEXT: addl %ecx, %eax
+; AVX-NEXT: movl %eax, %ecx
+; AVX-NEXT: andl $65520, %ecx # imm = 0xFFF0
+; AVX-NEXT: shrl $4, %ecx
+; AVX-NEXT: addl %eax, %ecx
+; AVX-NEXT: andl $3855, %ecx # imm = 0xF0F
+; AVX-NEXT: imull $257, %ecx, %eax # imm = 0x101
+; AVX-NEXT: movzbl %ah, %eax # NOREX
+; AVX-NEXT: vpinsrw $3, %eax, %xmm1, %xmm1
+; AVX-NEXT: vpextrw $4, %xmm0, %eax
+; AVX-NEXT: movl %eax, %ecx
+; AVX-NEXT: shrl %ecx
+; AVX-NEXT: andl $21845, %ecx # imm = 0x5555
+; AVX-NEXT: subl %ecx, %eax
+; AVX-NEXT: movl %eax, %ecx
+; AVX-NEXT: andl $13107, %ecx # imm = 0x3333
+; AVX-NEXT: shrl $2, %eax
+; AVX-NEXT: andl $13107, %eax # imm = 0x3333
+; AVX-NEXT: addl %ecx, %eax
+; AVX-NEXT: movl %eax, %ecx
+; AVX-NEXT: andl $65520, %ecx # imm = 0xFFF0
+; AVX-NEXT: shrl $4, %ecx
+; AVX-NEXT: addl %eax, %ecx
+; AVX-NEXT: andl $3855, %ecx # imm = 0xF0F
+; AVX-NEXT: imull $257, %ecx, %eax # imm = 0x101
+; AVX-NEXT: movzbl %ah, %eax # NOREX
+; AVX-NEXT: vpinsrw $4, %eax, %xmm1, %xmm1
+; AVX-NEXT: vpextrw $5, %xmm0, %eax
+; AVX-NEXT: movl %eax, %ecx
+; AVX-NEXT: shrl %ecx
+; AVX-NEXT: andl $21845, %ecx # imm = 0x5555
+; AVX-NEXT: subl %ecx, %eax
+; AVX-NEXT: movl %eax, %ecx
+; AVX-NEXT: andl $13107, %ecx # imm = 0x3333
+; AVX-NEXT: shrl $2, %eax
+; AVX-NEXT: andl $13107, %eax # imm = 0x3333
+; AVX-NEXT: addl %ecx, %eax
+; AVX-NEXT: movl %eax, %ecx
+; AVX-NEXT: andl $65520, %ecx # imm = 0xFFF0
+; AVX-NEXT: shrl $4, %ecx
+; AVX-NEXT: addl %eax, %ecx
+; AVX-NEXT: andl $3855, %ecx # imm = 0xF0F
+; AVX-NEXT: imull $257, %ecx, %eax # imm = 0x101
+; AVX-NEXT: movzbl %ah, %eax # NOREX
+; AVX-NEXT: vpinsrw $5, %eax, %xmm1, %xmm1
+; AVX-NEXT: vpextrw $6, %xmm0, %eax
+; AVX-NEXT: movl %eax, %ecx
+; AVX-NEXT: shrl %ecx
+; AVX-NEXT: andl $21845, %ecx # imm = 0x5555
+; AVX-NEXT: subl %ecx, %eax
+; AVX-NEXT: movl %eax, %ecx
+; AVX-NEXT: andl $13107, %ecx # imm = 0x3333
+; AVX-NEXT: shrl $2, %eax
+; AVX-NEXT: andl $13107, %eax # imm = 0x3333
+; AVX-NEXT: addl %ecx, %eax
+; AVX-NEXT: movl %eax, %ecx
+; AVX-NEXT: andl $65520, %ecx # imm = 0xFFF0
+; AVX-NEXT: shrl $4, %ecx
+; AVX-NEXT: addl %eax, %ecx
+; AVX-NEXT: andl $3855, %ecx # imm = 0xF0F
+; AVX-NEXT: imull $257, %ecx, %eax # imm = 0x101
+; AVX-NEXT: movzbl %ah, %eax # NOREX
+; AVX-NEXT: vpinsrw $6, %eax, %xmm1, %xmm1
+; AVX-NEXT: vpextrw $7, %xmm0, %eax
+; AVX-NEXT: movl %eax, %ecx
+; AVX-NEXT: shrl %ecx
+; AVX-NEXT: andl $21845, %ecx # imm = 0x5555
+; AVX-NEXT: subl %ecx, %eax
+; AVX-NEXT: movl %eax, %ecx
+; AVX-NEXT: andl $13107, %ecx # imm = 0x3333
+; AVX-NEXT: shrl $2, %eax
+; AVX-NEXT: andl $13107, %eax # imm = 0x3333
+; AVX-NEXT: addl %ecx, %eax
+; AVX-NEXT: movl %eax, %ecx
+; AVX-NEXT: andl $65520, %ecx # imm = 0xFFF0
+; AVX-NEXT: shrl $4, %ecx
+; AVX-NEXT: addl %eax, %ecx
+; AVX-NEXT: andl $3855, %ecx # imm = 0xF0F
+; AVX-NEXT: imull $257, %ecx, %eax # imm = 0x101
+; AVX-NEXT: movzbl %ah, %eax # NOREX
+; AVX-NEXT: vpinsrw $7, %eax, %xmm1, %xmm0
+; AVX-NEXT: retq
+ %out = call <8 x i16> @llvm.ctpop.v8i16(<8 x i16> %in)
+ ret <8 x i16> %out
+}
+
+define <16 x i8> @testv16i8(<16 x i8> %in) {
+; SSE2-LABEL: testv16i8:
+; SSE2: # BB#0:
+; SSE2-NEXT: pushq %rbp
+; SSE2-NEXT: .Ltmp0:
+; SSE2-NEXT: .cfi_def_cfa_offset 16
+; SSE2-NEXT: pushq %rbx
+; SSE2-NEXT: .Ltmp1:
+; SSE2-NEXT: .cfi_def_cfa_offset 24
+; SSE2-NEXT: .Ltmp2:
+; SSE2-NEXT: .cfi_offset %rbx, -24
+; SSE2-NEXT: .Ltmp3:
+; SSE2-NEXT: .cfi_offset %rbp, -16
+; SSE2-NEXT: movaps %xmm0, -{{[0-9]+}}(%rsp)
+; SSE2-NEXT: movb -{{[0-9]+}}(%rsp), %al
+; SSE2-NEXT: movb %al, %cl
+; SSE2-NEXT: shrb %cl
+; SSE2-NEXT: andb $85, %cl
+; SSE2-NEXT: subb %cl, %al
+; SSE2-NEXT: movb %al, %cl
+; SSE2-NEXT: andb $51, %cl
+; SSE2-NEXT: shrb $2, %al
+; SSE2-NEXT: andb $51, %al
+; SSE2-NEXT: addb %cl, %al
+; SSE2-NEXT: movb %al, %cl
+; SSE2-NEXT: shrb $4, %cl
+; SSE2-NEXT: addb %al, %cl
+; SSE2-NEXT: andb $15, %cl
+; SSE2-NEXT: movzbl %cl, %eax
+; SSE2-NEXT: movd %eax, %xmm0
+; SSE2-NEXT: movb -{{[0-9]+}}(%rsp), %r10b
+; SSE2-NEXT: movb -{{[0-9]+}}(%rsp), %cl
+; SSE2-NEXT: movb -{{[0-9]+}}(%rsp), %dil
+; SSE2-NEXT: movb -{{[0-9]+}}(%rsp), %dl
+; SSE2-NEXT: movb -{{[0-9]+}}(%rsp), %r9b
+; SSE2-NEXT: movb -{{[0-9]+}}(%rsp), %bpl
+; SSE2-NEXT: movb -{{[0-9]+}}(%rsp), %sil
+; SSE2-NEXT: movb -{{[0-9]+}}(%rsp), %bl
+; SSE2-NEXT: movb %bl, %al
+; SSE2-NEXT: shrb %al
+; SSE2-NEXT: andb $85, %al
+; SSE2-NEXT: subb %al, %bl
+; SSE2-NEXT: movb %bl, %al
+; SSE2-NEXT: andb $51, %al
+; SSE2-NEXT: shrb $2, %bl
+; SSE2-NEXT: andb $51, %bl
+; SSE2-NEXT: addb %al, %bl
+; SSE2-NEXT: movb %bl, %al
+; SSE2-NEXT: shrb $4, %al
+; SSE2-NEXT: addb %bl, %al
+; SSE2-NEXT: andb $15, %al
+; SSE2-NEXT: movzbl %al, %eax
+; SSE2-NEXT: movd %eax, %xmm1
+; SSE2-NEXT: punpcklbw {{.*#+}} xmm1 = xmm1[0],xmm0[0],xmm1[1],xmm0[1],xmm1[2],xmm0[2],xmm1[3],xmm0[3],xmm1[4],xmm0[4],xmm1[5],xmm0[5],xmm1[6],xmm0[6],xmm1[7],xmm0[7]
+; SSE2-NEXT: movb %dl, %al
+; SSE2-NEXT: shrb %al
+; SSE2-NEXT: andb $85, %al
+; SSE2-NEXT: subb %al, %dl
+; SSE2-NEXT: movb %dl, %al
+; SSE2-NEXT: andb $51, %al
+; SSE2-NEXT: shrb $2, %dl
+; SSE2-NEXT: andb $51, %dl
+; SSE2-NEXT: addb %al, %dl
+; SSE2-NEXT: movb %dl, %al
+; SSE2-NEXT: shrb $4, %al
+; SSE2-NEXT: addb %dl, %al
+; SSE2-NEXT: andb $15, %al
+; SSE2-NEXT: movzbl %al, %eax
+; SSE2-NEXT: movd %eax, %xmm2
+; SSE2-NEXT: movb -{{[0-9]+}}(%rsp), %r11b
+; SSE2-NEXT: movb -{{[0-9]+}}(%rsp), %dl
+; SSE2-NEXT: movb -{{[0-9]+}}(%rsp), %r8b
+; SSE2-NEXT: movb -{{[0-9]+}}(%rsp), %al
+; SSE2-NEXT: movb %al, %bl
+; SSE2-NEXT: shrb %bl
+; SSE2-NEXT: andb $85, %bl
+; SSE2-NEXT: subb %bl, %al
+; SSE2-NEXT: movb %al, %bl
+; SSE2-NEXT: andb $51, %bl
+; SSE2-NEXT: shrb $2, %al
+; SSE2-NEXT: andb $51, %al
+; SSE2-NEXT: addb %bl, %al
+; SSE2-NEXT: movb %al, %bl
+; SSE2-NEXT: shrb $4, %bl
+; SSE2-NEXT: addb %al, %bl
+; SSE2-NEXT: andb $15, %bl
+; SSE2-NEXT: movzbl %bl, %eax
+; SSE2-NEXT: movd %eax, %xmm0
+; SSE2-NEXT: punpcklbw {{.*#+}} xmm0 = xmm0[0],xmm2[0],xmm0[1],xmm2[1],xmm0[2],xmm2[2],xmm0[3],xmm2[3],xmm0[4],xmm2[4],xmm0[5],xmm2[5],xmm0[6],xmm2[6],xmm0[7],xmm2[7]
+; SSE2-NEXT: punpcklbw {{.*#+}} xmm0 = xmm0[0],xmm1[0],xmm0[1],xmm1[1],xmm0[2],xmm1[2],xmm0[3],xmm1[3],xmm0[4],xmm1[4],xmm0[5],xmm1[5],xmm0[6],xmm1[6],xmm0[7],xmm1[7]
+; SSE2-NEXT: movb %cl, %al
+; SSE2-NEXT: shrb %al
+; SSE2-NEXT: andb $85, %al
+; SSE2-NEXT: subb %al, %cl
+; SSE2-NEXT: movb %cl, %al
+; SSE2-NEXT: andb $51, %al
+; SSE2-NEXT: shrb $2, %cl
+; SSE2-NEXT: andb $51, %cl
+; SSE2-NEXT: addb %al, %cl
+; SSE2-NEXT: movb %cl, %al
+; SSE2-NEXT: shrb $4, %al
+; SSE2-NEXT: addb %cl, %al
+; SSE2-NEXT: andb $15, %al
+; SSE2-NEXT: movzbl %al, %eax
+; SSE2-NEXT: movd %eax, %xmm1
+; SSE2-NEXT: movb %dl, %al
+; SSE2-NEXT: shrb %al
+; SSE2-NEXT: andb $85, %al
+; SSE2-NEXT: subb %al, %dl
+; SSE2-NEXT: movb %dl, %al
+; SSE2-NEXT: andb $51, %al
+; SSE2-NEXT: shrb $2, %dl
+; SSE2-NEXT: andb $51, %dl
+; SSE2-NEXT: addb %al, %dl
+; SSE2-NEXT: movb %dl, %al
+; SSE2-NEXT: shrb $4, %al
+; SSE2-NEXT: addb %dl, %al
+; SSE2-NEXT: andb $15, %al
+; SSE2-NEXT: movzbl %al, %eax
+; SSE2-NEXT: movd %eax, %xmm2
+; SSE2-NEXT: punpcklbw {{.*#+}} xmm2 = xmm2[0],xmm1[0],xmm2[1],xmm1[1],xmm2[2],xmm1[2],xmm2[3],xmm1[3],xmm2[4],xmm1[4],xmm2[5],xmm1[5],xmm2[6],xmm1[6],xmm2[7],xmm1[7]
+; SSE2-NEXT: movb %bpl, %al
+; SSE2-NEXT: shrb %al
+; SSE2-NEXT: andb $85, %al
+; SSE2-NEXT: subb %al, %bpl
+; SSE2-NEXT: movb %bpl, %al
+; SSE2-NEXT: andb $51, %al
+; SSE2-NEXT: shrb $2, %bpl
+; SSE2-NEXT: andb $51, %bpl
+; SSE2-NEXT: addb %al, %bpl
+; SSE2-NEXT: movb %bpl, %al
+; SSE2-NEXT: shrb $4, %al
+; SSE2-NEXT: addb %bpl, %al
+; SSE2-NEXT: andb $15, %al
+; SSE2-NEXT: movzbl %al, %eax
+; SSE2-NEXT: movd %eax, %xmm3
+; SSE2-NEXT: movb -{{[0-9]+}}(%rsp), %al
+; SSE2-NEXT: movb -{{[0-9]+}}(%rsp), %cl
+; SSE2-NEXT: movb %cl, %dl
+; SSE2-NEXT: shrb %dl
+; SSE2-NEXT: andb $85, %dl
+; SSE2-NEXT: subb %dl, %cl
+; SSE2-NEXT: movb %cl, %dl
+; SSE2-NEXT: andb $51, %dl
+; SSE2-NEXT: shrb $2, %cl
+; SSE2-NEXT: andb $51, %cl
+; SSE2-NEXT: addb %dl, %cl
+; SSE2-NEXT: movb %cl, %dl
+; SSE2-NEXT: shrb $4, %dl
+; SSE2-NEXT: addb %cl, %dl
+; SSE2-NEXT: andb $15, %dl
+; SSE2-NEXT: movzbl %dl, %ecx
+; SSE2-NEXT: movd %ecx, %xmm1
+; SSE2-NEXT: punpcklbw {{.*#+}} xmm1 = xmm1[0],xmm3[0],xmm1[1],xmm3[1],xmm1[2],xmm3[2],xmm1[3],xmm3[3],xmm1[4],xmm3[4],xmm1[5],xmm3[5],xmm1[6],xmm3[6],xmm1[7],xmm3[7]
+; SSE2-NEXT: punpcklbw {{.*#+}} xmm1 = xmm1[0],xmm2[0],xmm1[1],xmm2[1],xmm1[2],xmm2[2],xmm1[3],xmm2[3],xmm1[4],xmm2[4],xmm1[5],xmm2[5],xmm1[6],xmm2[6],xmm1[7],xmm2[7]
+; SSE2-NEXT: punpcklbw {{.*#+}} xmm1 = xmm1[0],xmm0[0],xmm1[1],xmm0[1],xmm1[2],xmm0[2],xmm1[3],xmm0[3],xmm1[4],xmm0[4],xmm1[5],xmm0[5],xmm1[6],xmm0[6],xmm1[7],xmm0[7]
+; SSE2-NEXT: movb %r10b, %cl
+; SSE2-NEXT: shrb %cl
+; SSE2-NEXT: andb $85, %cl
+; SSE2-NEXT: subb %cl, %r10b
+; SSE2-NEXT: movb %r10b, %cl
+; SSE2-NEXT: andb $51, %cl
+; SSE2-NEXT: shrb $2, %r10b
+; SSE2-NEXT: andb $51, %r10b
+; SSE2-NEXT: addb %cl, %r10b
+; SSE2-NEXT: movb %r10b, %cl
+; SSE2-NEXT: shrb $4, %cl
+; SSE2-NEXT: addb %r10b, %cl
+; SSE2-NEXT: andb $15, %cl
+; SSE2-NEXT: movzbl %cl, %ecx
+; SSE2-NEXT: movd %ecx, %xmm2
+; SSE2-NEXT: movb %r11b, %cl
+; SSE2-NEXT: shrb %cl
+; SSE2-NEXT: andb $85, %cl
+; SSE2-NEXT: subb %cl, %r11b
+; SSE2-NEXT: movb %r11b, %cl
+; SSE2-NEXT: andb $51, %cl
+; SSE2-NEXT: shrb $2, %r11b
+; SSE2-NEXT: andb $51, %r11b
+; SSE2-NEXT: addb %cl, %r11b
+; SSE2-NEXT: movb %r11b, %cl
+; SSE2-NEXT: shrb $4, %cl
+; SSE2-NEXT: addb %r11b, %cl
+; SSE2-NEXT: andb $15, %cl
+; SSE2-NEXT: movzbl %cl, %ecx
+; SSE2-NEXT: movd %ecx, %xmm0
+; SSE2-NEXT: punpcklbw {{.*#+}} xmm0 = xmm0[0],xmm2[0],xmm0[1],xmm2[1],xmm0[2],xmm2[2],xmm0[3],xmm2[3],xmm0[4],xmm2[4],xmm0[5],xmm2[5],xmm0[6],xmm2[6],xmm0[7],xmm2[7]
+; SSE2-NEXT: movb %r9b, %cl
+; SSE2-NEXT: shrb %cl
+; SSE2-NEXT: andb $85, %cl
+; SSE2-NEXT: subb %cl, %r9b
+; SSE2-NEXT: movb %r9b, %cl
+; SSE2-NEXT: andb $51, %cl
+; SSE2-NEXT: shrb $2, %r9b
+; SSE2-NEXT: andb $51, %r9b
+; SSE2-NEXT: addb %cl, %r9b
+; SSE2-NEXT: movb %r9b, %cl
+; SSE2-NEXT: shrb $4, %cl
+; SSE2-NEXT: addb %r9b, %cl
+; SSE2-NEXT: andb $15, %cl
+; SSE2-NEXT: movzbl %cl, %ecx
+; SSE2-NEXT: movd %ecx, %xmm3
+; SSE2-NEXT: movb %al, %cl
+; SSE2-NEXT: shrb %cl
+; SSE2-NEXT: andb $85, %cl
+; SSE2-NEXT: subb %cl, %al
+; SSE2-NEXT: movb %al, %cl
+; SSE2-NEXT: andb $51, %cl
+; SSE2-NEXT: shrb $2, %al
+; SSE2-NEXT: andb $51, %al
+; SSE2-NEXT: addb %cl, %al
+; SSE2-NEXT: movb %al, %cl
+; SSE2-NEXT: shrb $4, %cl
+; SSE2-NEXT: addb %al, %cl
+; SSE2-NEXT: andb $15, %cl
+; SSE2-NEXT: movzbl %cl, %eax
+; SSE2-NEXT: movd %eax, %xmm2
+; SSE2-NEXT: punpcklbw {{.*#+}} xmm2 = xmm2[0],xmm3[0],xmm2[1],xmm3[1],xmm2[2],xmm3[2],xmm2[3],xmm3[3],xmm2[4],xmm3[4],xmm2[5],xmm3[5],xmm2[6],xmm3[6],xmm2[7],xmm3[7]
+; SSE2-NEXT: punpcklbw {{.*#+}} xmm2 = xmm2[0],xmm0[0],xmm2[1],xmm0[1],xmm2[2],xmm0[2],xmm2[3],xmm0[3],xmm2[4],xmm0[4],xmm2[5],xmm0[5],xmm2[6],xmm0[6],xmm2[7],xmm0[7]
+; SSE2-NEXT: movb %dil, %al
+; SSE2-NEXT: shrb %al
+; SSE2-NEXT: andb $85, %al
+; SSE2-NEXT: subb %al, %dil
+; SSE2-NEXT: movb %dil, %al
+; SSE2-NEXT: andb $51, %al
+; SSE2-NEXT: shrb $2, %dil
+; SSE2-NEXT: andb $51, %dil
+; SSE2-NEXT: addb %al, %dil
+; SSE2-NEXT: movb %dil, %al
+; SSE2-NEXT: shrb $4, %al
+; SSE2-NEXT: addb %dil, %al
+; SSE2-NEXT: andb $15, %al
+; SSE2-NEXT: movzbl %al, %eax
+; SSE2-NEXT: movd %eax, %xmm0
+; SSE2-NEXT: movb %r8b, %al
+; SSE2-NEXT: shrb %al
+; SSE2-NEXT: andb $85, %al
+; SSE2-NEXT: subb %al, %r8b
+; SSE2-NEXT: movb %r8b, %al
+; SSE2-NEXT: andb $51, %al
+; SSE2-NEXT: shrb $2, %r8b
+; SSE2-NEXT: andb $51, %r8b
+; SSE2-NEXT: addb %al, %r8b
+; SSE2-NEXT: movb %r8b, %al
+; SSE2-NEXT: shrb $4, %al
+; SSE2-NEXT: addb %r8b, %al
+; SSE2-NEXT: andb $15, %al
+; SSE2-NEXT: movzbl %al, %eax
+; SSE2-NEXT: movd %eax, %xmm3
+; SSE2-NEXT: punpcklbw {{.*#+}} xmm3 = xmm3[0],xmm0[0],xmm3[1],xmm0[1],xmm3[2],xmm0[2],xmm3[3],xmm0[3],xmm3[4],xmm0[4],xmm3[5],xmm0[5],xmm3[6],xmm0[6],xmm3[7],xmm0[7]
+; SSE2-NEXT: movb %sil, %al
+; SSE2-NEXT: shrb %al
+; SSE2-NEXT: andb $85, %al
+; SSE2-NEXT: subb %al, %sil
+; SSE2-NEXT: movb %sil, %al
+; SSE2-NEXT: andb $51, %al
+; SSE2-NEXT: shrb $2, %sil
+; SSE2-NEXT: andb $51, %sil
+; SSE2-NEXT: addb %al, %sil
+; SSE2-NEXT: movb %sil, %al
+; SSE2-NEXT: shrb $4, %al
+; SSE2-NEXT: addb %sil, %al
+; SSE2-NEXT: andb $15, %al
+; SSE2-NEXT: movzbl %al, %eax
+; SSE2-NEXT: movd %eax, %xmm4
+; SSE2-NEXT: movb -{{[0-9]+}}(%rsp), %al
+; SSE2-NEXT: movb %al, %cl
+; SSE2-NEXT: shrb %cl
+; SSE2-NEXT: andb $85, %cl
+; SSE2-NEXT: subb %cl, %al
+; SSE2-NEXT: movb %al, %cl
+; SSE2-NEXT: andb $51, %cl
+; SSE2-NEXT: shrb $2, %al
+; SSE2-NEXT: andb $51, %al
+; SSE2-NEXT: addb %cl, %al
+; SSE2-NEXT: movb %al, %cl
+; SSE2-NEXT: shrb $4, %cl
+; SSE2-NEXT: addb %al, %cl
+; SSE2-NEXT: andb $15, %cl
+; SSE2-NEXT: movzbl %cl, %eax
+; SSE2-NEXT: movd %eax, %xmm0
+; SSE2-NEXT: punpcklbw {{.*#+}} xmm0 = xmm0[0],xmm4[0],xmm0[1],xmm4[1],xmm0[2],xmm4[2],xmm0[3],xmm4[3],xmm0[4],xmm4[4],xmm0[5],xmm4[5],xmm0[6],xmm4[6],xmm0[7],xmm4[7]
+; SSE2-NEXT: punpcklbw {{.*#+}} xmm0 = xmm0[0],xmm3[0],xmm0[1],xmm3[1],xmm0[2],xmm3[2],xmm0[3],xmm3[3],xmm0[4],xmm3[4],xmm0[5],xmm3[5],xmm0[6],xmm3[6],xmm0[7],xmm3[7]
+; SSE2-NEXT: punpcklbw {{.*#+}} xmm0 = xmm0[0],xmm2[0],xmm0[1],xmm2[1],xmm0[2],xmm2[2],xmm0[3],xmm2[3],xmm0[4],xmm2[4],xmm0[5],xmm2[5],xmm0[6],xmm2[6],xmm0[7],xmm2[7]
+; SSE2-NEXT: punpcklbw {{.*#+}} xmm0 = xmm0[0],xmm1[0],xmm0[1],xmm1[1],xmm0[2],xmm1[2],xmm0[3],xmm1[3],xmm0[4],xmm1[4],xmm0[5],xmm1[5],xmm0[6],xmm1[6],xmm0[7],xmm1[7]
+; SSE2-NEXT: popq %rbx
+; SSE2-NEXT: popq %rbp
+; SSE2-NEXT: retq
+;
+; SSE3-LABEL: testv16i8:
+; SSE3: # BB#0:
+; SSE3-NEXT: pushq %rbp
+; SSE3-NEXT: .Ltmp0:
+; SSE3-NEXT: .cfi_def_cfa_offset 16
+; SSE3-NEXT: pushq %rbx
+; SSE3-NEXT: .Ltmp1:
+; SSE3-NEXT: .cfi_def_cfa_offset 24
+; SSE3-NEXT: .Ltmp2:
+; SSE3-NEXT: .cfi_offset %rbx, -24
+; SSE3-NEXT: .Ltmp3:
+; SSE3-NEXT: .cfi_offset %rbp, -16
+; SSE3-NEXT: movaps %xmm0, -{{[0-9]+}}(%rsp)
+; SSE3-NEXT: movb -{{[0-9]+}}(%rsp), %al
+; SSE3-NEXT: movb %al, %cl
+; SSE3-NEXT: shrb %cl
+; SSE3-NEXT: andb $85, %cl
+; SSE3-NEXT: subb %cl, %al
+; SSE3-NEXT: movb %al, %cl
+; SSE3-NEXT: andb $51, %cl
+; SSE3-NEXT: shrb $2, %al
+; SSE3-NEXT: andb $51, %al
+; SSE3-NEXT: addb %cl, %al
+; SSE3-NEXT: movb %al, %cl
+; SSE3-NEXT: shrb $4, %cl
+; SSE3-NEXT: addb %al, %cl
+; SSE3-NEXT: andb $15, %cl
+; SSE3-NEXT: movzbl %cl, %eax
+; SSE3-NEXT: movd %eax, %xmm0
+; SSE3-NEXT: movb -{{[0-9]+}}(%rsp), %r10b
+; SSE3-NEXT: movb -{{[0-9]+}}(%rsp), %cl
+; SSE3-NEXT: movb -{{[0-9]+}}(%rsp), %dil
+; SSE3-NEXT: movb -{{[0-9]+}}(%rsp), %dl
+; SSE3-NEXT: movb -{{[0-9]+}}(%rsp), %r9b
+; SSE3-NEXT: movb -{{[0-9]+}}(%rsp), %bpl
+; SSE3-NEXT: movb -{{[0-9]+}}(%rsp), %sil
+; SSE3-NEXT: movb -{{[0-9]+}}(%rsp), %bl
+; SSE3-NEXT: movb %bl, %al
+; SSE3-NEXT: shrb %al
+; SSE3-NEXT: andb $85, %al
+; SSE3-NEXT: subb %al, %bl
+; SSE3-NEXT: movb %bl, %al
+; SSE3-NEXT: andb $51, %al
+; SSE3-NEXT: shrb $2, %bl
+; SSE3-NEXT: andb $51, %bl
+; SSE3-NEXT: addb %al, %bl
+; SSE3-NEXT: movb %bl, %al
+; SSE3-NEXT: shrb $4, %al
+; SSE3-NEXT: addb %bl, %al
+; SSE3-NEXT: andb $15, %al
+; SSE3-NEXT: movzbl %al, %eax
+; SSE3-NEXT: movd %eax, %xmm1
+; SSE3-NEXT: punpcklbw {{.*#+}} xmm1 = xmm1[0],xmm0[0],xmm1[1],xmm0[1],xmm1[2],xmm0[2],xmm1[3],xmm0[3],xmm1[4],xmm0[4],xmm1[5],xmm0[5],xmm1[6],xmm0[6],xmm1[7],xmm0[7]
+; SSE3-NEXT: movb %dl, %al
+; SSE3-NEXT: shrb %al
+; SSE3-NEXT: andb $85, %al
+; SSE3-NEXT: subb %al, %dl
+; SSE3-NEXT: movb %dl, %al
+; SSE3-NEXT: andb $51, %al
+; SSE3-NEXT: shrb $2, %dl
+; SSE3-NEXT: andb $51, %dl
+; SSE3-NEXT: addb %al, %dl
+; SSE3-NEXT: movb %dl, %al
+; SSE3-NEXT: shrb $4, %al
+; SSE3-NEXT: addb %dl, %al
+; SSE3-NEXT: andb $15, %al
+; SSE3-NEXT: movzbl %al, %eax
+; SSE3-NEXT: movd %eax, %xmm2
+; SSE3-NEXT: movb -{{[0-9]+}}(%rsp), %r11b
+; SSE3-NEXT: movb -{{[0-9]+}}(%rsp), %dl
+; SSE3-NEXT: movb -{{[0-9]+}}(%rsp), %r8b
+; SSE3-NEXT: movb -{{[0-9]+}}(%rsp), %al
+; SSE3-NEXT: movb %al, %bl
+; SSE3-NEXT: shrb %bl
+; SSE3-NEXT: andb $85, %bl
+; SSE3-NEXT: subb %bl, %al
+; SSE3-NEXT: movb %al, %bl
+; SSE3-NEXT: andb $51, %bl
+; SSE3-NEXT: shrb $2, %al
+; SSE3-NEXT: andb $51, %al
+; SSE3-NEXT: addb %bl, %al
+; SSE3-NEXT: movb %al, %bl
+; SSE3-NEXT: shrb $4, %bl
+; SSE3-NEXT: addb %al, %bl
+; SSE3-NEXT: andb $15, %bl
+; SSE3-NEXT: movzbl %bl, %eax
+; SSE3-NEXT: movd %eax, %xmm0
+; SSE3-NEXT: punpcklbw {{.*#+}} xmm0 = xmm0[0],xmm2[0],xmm0[1],xmm2[1],xmm0[2],xmm2[2],xmm0[3],xmm2[3],xmm0[4],xmm2[4],xmm0[5],xmm2[5],xmm0[6],xmm2[6],xmm0[7],xmm2[7]
+; SSE3-NEXT: punpcklbw {{.*#+}} xmm0 = xmm0[0],xmm1[0],xmm0[1],xmm1[1],xmm0[2],xmm1[2],xmm0[3],xmm1[3],xmm0[4],xmm1[4],xmm0[5],xmm1[5],xmm0[6],xmm1[6],xmm0[7],xmm1[7]
+; SSE3-NEXT: movb %cl, %al
+; SSE3-NEXT: shrb %al
+; SSE3-NEXT: andb $85, %al
+; SSE3-NEXT: subb %al, %cl
+; SSE3-NEXT: movb %cl, %al
+; SSE3-NEXT: andb $51, %al
+; SSE3-NEXT: shrb $2, %cl
+; SSE3-NEXT: andb $51, %cl
+; SSE3-NEXT: addb %al, %cl
+; SSE3-NEXT: movb %cl, %al
+; SSE3-NEXT: shrb $4, %al
+; SSE3-NEXT: addb %cl, %al
+; SSE3-NEXT: andb $15, %al
+; SSE3-NEXT: movzbl %al, %eax
+; SSE3-NEXT: movd %eax, %xmm1
+; SSE3-NEXT: movb %dl, %al
+; SSE3-NEXT: shrb %al
+; SSE3-NEXT: andb $85, %al
+; SSE3-NEXT: subb %al, %dl
+; SSE3-NEXT: movb %dl, %al
+; SSE3-NEXT: andb $51, %al
+; SSE3-NEXT: shrb $2, %dl
+; SSE3-NEXT: andb $51, %dl
+; SSE3-NEXT: addb %al, %dl
+; SSE3-NEXT: movb %dl, %al
+; SSE3-NEXT: shrb $4, %al
+; SSE3-NEXT: addb %dl, %al
+; SSE3-NEXT: andb $15, %al
+; SSE3-NEXT: movzbl %al, %eax
+; SSE3-NEXT: movd %eax, %xmm2
+; SSE3-NEXT: punpcklbw {{.*#+}} xmm2 = xmm2[0],xmm1[0],xmm2[1],xmm1[1],xmm2[2],xmm1[2],xmm2[3],xmm1[3],xmm2[4],xmm1[4],xmm2[5],xmm1[5],xmm2[6],xmm1[6],xmm2[7],xmm1[7]
+; SSE3-NEXT: movb %bpl, %al
+; SSE3-NEXT: shrb %al
+; SSE3-NEXT: andb $85, %al
+; SSE3-NEXT: subb %al, %bpl
+; SSE3-NEXT: movb %bpl, %al
+; SSE3-NEXT: andb $51, %al
+; SSE3-NEXT: shrb $2, %bpl
+; SSE3-NEXT: andb $51, %bpl
+; SSE3-NEXT: addb %al, %bpl
+; SSE3-NEXT: movb %bpl, %al
+; SSE3-NEXT: shrb $4, %al
+; SSE3-NEXT: addb %bpl, %al
+; SSE3-NEXT: andb $15, %al
+; SSE3-NEXT: movzbl %al, %eax
+; SSE3-NEXT: movd %eax, %xmm3
+; SSE3-NEXT: movb -{{[0-9]+}}(%rsp), %al
+; SSE3-NEXT: movb -{{[0-9]+}}(%rsp), %cl
+; SSE3-NEXT: movb %cl, %dl
+; SSE3-NEXT: shrb %dl
+; SSE3-NEXT: andb $85, %dl
+; SSE3-NEXT: subb %dl, %cl
+; SSE3-NEXT: movb %cl, %dl
+; SSE3-NEXT: andb $51, %dl
+; SSE3-NEXT: shrb $2, %cl
+; SSE3-NEXT: andb $51, %cl
+; SSE3-NEXT: addb %dl, %cl
+; SSE3-NEXT: movb %cl, %dl
+; SSE3-NEXT: shrb $4, %dl
+; SSE3-NEXT: addb %cl, %dl
+; SSE3-NEXT: andb $15, %dl
+; SSE3-NEXT: movzbl %dl, %ecx
+; SSE3-NEXT: movd %ecx, %xmm1
+; SSE3-NEXT: punpcklbw {{.*#+}} xmm1 = xmm1[0],xmm3[0],xmm1[1],xmm3[1],xmm1[2],xmm3[2],xmm1[3],xmm3[3],xmm1[4],xmm3[4],xmm1[5],xmm3[5],xmm1[6],xmm3[6],xmm1[7],xmm3[7]
+; SSE3-NEXT: punpcklbw {{.*#+}} xmm1 = xmm1[0],xmm2[0],xmm1[1],xmm2[1],xmm1[2],xmm2[2],xmm1[3],xmm2[3],xmm1[4],xmm2[4],xmm1[5],xmm2[5],xmm1[6],xmm2[6],xmm1[7],xmm2[7]
+; SSE3-NEXT: punpcklbw {{.*#+}} xmm1 = xmm1[0],xmm0[0],xmm1[1],xmm0[1],xmm1[2],xmm0[2],xmm1[3],xmm0[3],xmm1[4],xmm0[4],xmm1[5],xmm0[5],xmm1[6],xmm0[6],xmm1[7],xmm0[7]
+; SSE3-NEXT: movb %r10b, %cl
+; SSE3-NEXT: shrb %cl
+; SSE3-NEXT: andb $85, %cl
+; SSE3-NEXT: subb %cl, %r10b
+; SSE3-NEXT: movb %r10b, %cl
+; SSE3-NEXT: andb $51, %cl
+; SSE3-NEXT: shrb $2, %r10b
+; SSE3-NEXT: andb $51, %r10b
+; SSE3-NEXT: addb %cl, %r10b
+; SSE3-NEXT: movb %r10b, %cl
+; SSE3-NEXT: shrb $4, %cl
+; SSE3-NEXT: addb %r10b, %cl
+; SSE3-NEXT: andb $15, %cl
+; SSE3-NEXT: movzbl %cl, %ecx
+; SSE3-NEXT: movd %ecx, %xmm2
+; SSE3-NEXT: movb %r11b, %cl
+; SSE3-NEXT: shrb %cl
+; SSE3-NEXT: andb $85, %cl
+; SSE3-NEXT: subb %cl, %r11b
+; SSE3-NEXT: movb %r11b, %cl
+; SSE3-NEXT: andb $51, %cl
+; SSE3-NEXT: shrb $2, %r11b
+; SSE3-NEXT: andb $51, %r11b
+; SSE3-NEXT: addb %cl, %r11b
+; SSE3-NEXT: movb %r11b, %cl
+; SSE3-NEXT: shrb $4, %cl
+; SSE3-NEXT: addb %r11b, %cl
+; SSE3-NEXT: andb $15, %cl
+; SSE3-NEXT: movzbl %cl, %ecx
+; SSE3-NEXT: movd %ecx, %xmm0
+; SSE3-NEXT: punpcklbw {{.*#+}} xmm0 = xmm0[0],xmm2[0],xmm0[1],xmm2[1],xmm0[2],xmm2[2],xmm0[3],xmm2[3],xmm0[4],xmm2[4],xmm0[5],xmm2[5],xmm0[6],xmm2[6],xmm0[7],xmm2[7]
+; SSE3-NEXT: movb %r9b, %cl
+; SSE3-NEXT: shrb %cl
+; SSE3-NEXT: andb $85, %cl
+; SSE3-NEXT: subb %cl, %r9b
+; SSE3-NEXT: movb %r9b, %cl
+; SSE3-NEXT: andb $51, %cl
+; SSE3-NEXT: shrb $2, %r9b
+; SSE3-NEXT: andb $51, %r9b
+; SSE3-NEXT: addb %cl, %r9b
+; SSE3-NEXT: movb %r9b, %cl
+; SSE3-NEXT: shrb $4, %cl
+; SSE3-NEXT: addb %r9b, %cl
+; SSE3-NEXT: andb $15, %cl
+; SSE3-NEXT: movzbl %cl, %ecx
+; SSE3-NEXT: movd %ecx, %xmm3
+; SSE3-NEXT: movb %al, %cl
+; SSE3-NEXT: shrb %cl
+; SSE3-NEXT: andb $85, %cl
+; SSE3-NEXT: subb %cl, %al
+; SSE3-NEXT: movb %al, %cl
+; SSE3-NEXT: andb $51, %cl
+; SSE3-NEXT: shrb $2, %al
+; SSE3-NEXT: andb $51, %al
+; SSE3-NEXT: addb %cl, %al
+; SSE3-NEXT: movb %al, %cl
+; SSE3-NEXT: shrb $4, %cl
+; SSE3-NEXT: addb %al, %cl
+; SSE3-NEXT: andb $15, %cl
+; SSE3-NEXT: movzbl %cl, %eax
+; SSE3-NEXT: movd %eax, %xmm2
+; SSE3-NEXT: punpcklbw {{.*#+}} xmm2 = xmm2[0],xmm3[0],xmm2[1],xmm3[1],xmm2[2],xmm3[2],xmm2[3],xmm3[3],xmm2[4],xmm3[4],xmm2[5],xmm3[5],xmm2[6],xmm3[6],xmm2[7],xmm3[7]
+; SSE3-NEXT: punpcklbw {{.*#+}} xmm2 = xmm2[0],xmm0[0],xmm2[1],xmm0[1],xmm2[2],xmm0[2],xmm2[3],xmm0[3],xmm2[4],xmm0[4],xmm2[5],xmm0[5],xmm2[6],xmm0[6],xmm2[7],xmm0[7]
+; SSE3-NEXT: movb %dil, %al
+; SSE3-NEXT: shrb %al
+; SSE3-NEXT: andb $85, %al
+; SSE3-NEXT: subb %al, %dil
+; SSE3-NEXT: movb %dil, %al
+; SSE3-NEXT: andb $51, %al
+; SSE3-NEXT: shrb $2, %dil
+; SSE3-NEXT: andb $51, %dil
+; SSE3-NEXT: addb %al, %dil
+; SSE3-NEXT: movb %dil, %al
+; SSE3-NEXT: shrb $4, %al
+; SSE3-NEXT: addb %dil, %al
+; SSE3-NEXT: andb $15, %al
+; SSE3-NEXT: movzbl %al, %eax
+; SSE3-NEXT: movd %eax, %xmm0
+; SSE3-NEXT: movb %r8b, %al
+; SSE3-NEXT: shrb %al
+; SSE3-NEXT: andb $85, %al
+; SSE3-NEXT: subb %al, %r8b
+; SSE3-NEXT: movb %r8b, %al
+; SSE3-NEXT: andb $51, %al
+; SSE3-NEXT: shrb $2, %r8b
+; SSE3-NEXT: andb $51, %r8b
+; SSE3-NEXT: addb %al, %r8b
+; SSE3-NEXT: movb %r8b, %al
+; SSE3-NEXT: shrb $4, %al
+; SSE3-NEXT: addb %r8b, %al
+; SSE3-NEXT: andb $15, %al
+; SSE3-NEXT: movzbl %al, %eax
+; SSE3-NEXT: movd %eax, %xmm3
+; SSE3-NEXT: punpcklbw {{.*#+}} xmm3 = xmm3[0],xmm0[0],xmm3[1],xmm0[1],xmm3[2],xmm0[2],xmm3[3],xmm0[3],xmm3[4],xmm0[4],xmm3[5],xmm0[5],xmm3[6],xmm0[6],xmm3[7],xmm0[7]
+; SSE3-NEXT: movb %sil, %al
+; SSE3-NEXT: shrb %al
+; SSE3-NEXT: andb $85, %al
+; SSE3-NEXT: subb %al, %sil
+; SSE3-NEXT: movb %sil, %al
+; SSE3-NEXT: andb $51, %al
+; SSE3-NEXT: shrb $2, %sil
+; SSE3-NEXT: andb $51, %sil
+; SSE3-NEXT: addb %al, %sil
+; SSE3-NEXT: movb %sil, %al
+; SSE3-NEXT: shrb $4, %al
+; SSE3-NEXT: addb %sil, %al
+; SSE3-NEXT: andb $15, %al
+; SSE3-NEXT: movzbl %al, %eax
+; SSE3-NEXT: movd %eax, %xmm4
+; SSE3-NEXT: movb -{{[0-9]+}}(%rsp), %al
+; SSE3-NEXT: movb %al, %cl
+; SSE3-NEXT: shrb %cl
+; SSE3-NEXT: andb $85, %cl
+; SSE3-NEXT: subb %cl, %al
+; SSE3-NEXT: movb %al, %cl
+; SSE3-NEXT: andb $51, %cl
+; SSE3-NEXT: shrb $2, %al
+; SSE3-NEXT: andb $51, %al
+; SSE3-NEXT: addb %cl, %al
+; SSE3-NEXT: movb %al, %cl
+; SSE3-NEXT: shrb $4, %cl
+; SSE3-NEXT: addb %al, %cl
+; SSE3-NEXT: andb $15, %cl
+; SSE3-NEXT: movzbl %cl, %eax
+; SSE3-NEXT: movd %eax, %xmm0
+; SSE3-NEXT: punpcklbw {{.*#+}} xmm0 = xmm0[0],xmm4[0],xmm0[1],xmm4[1],xmm0[2],xmm4[2],xmm0[3],xmm4[3],xmm0[4],xmm4[4],xmm0[5],xmm4[5],xmm0[6],xmm4[6],xmm0[7],xmm4[7]
+; SSE3-NEXT: punpcklbw {{.*#+}} xmm0 = xmm0[0],xmm3[0],xmm0[1],xmm3[1],xmm0[2],xmm3[2],xmm0[3],xmm3[3],xmm0[4],xmm3[4],xmm0[5],xmm3[5],xmm0[6],xmm3[6],xmm0[7],xmm3[7]
+; SSE3-NEXT: punpcklbw {{.*#+}} xmm0 = xmm0[0],xmm2[0],xmm0[1],xmm2[1],xmm0[2],xmm2[2],xmm0[3],xmm2[3],xmm0[4],xmm2[4],xmm0[5],xmm2[5],xmm0[6],xmm2[6],xmm0[7],xmm2[7]
+; SSE3-NEXT: punpcklbw {{.*#+}} xmm0 = xmm0[0],xmm1[0],xmm0[1],xmm1[1],xmm0[2],xmm1[2],xmm0[3],xmm1[3],xmm0[4],xmm1[4],xmm0[5],xmm1[5],xmm0[6],xmm1[6],xmm0[7],xmm1[7]
+; SSE3-NEXT: popq %rbx
+; SSE3-NEXT: popq %rbp
+; SSE3-NEXT: retq
+;
+; SSSE3-LABEL: testv16i8:
+; SSSE3: # BB#0:
+; SSSE3-NEXT: pushq %rbp
+; SSSE3-NEXT: .Ltmp0:
+; SSSE3-NEXT: .cfi_def_cfa_offset 16
+; SSSE3-NEXT: pushq %rbx
+; SSSE3-NEXT: .Ltmp1:
+; SSSE3-NEXT: .cfi_def_cfa_offset 24
+; SSSE3-NEXT: .Ltmp2:
+; SSSE3-NEXT: .cfi_offset %rbx, -24
+; SSSE3-NEXT: .Ltmp3:
+; SSSE3-NEXT: .cfi_offset %rbp, -16
+; SSSE3-NEXT: movaps %xmm0, -{{[0-9]+}}(%rsp)
+; SSSE3-NEXT: movb -{{[0-9]+}}(%rsp), %al
+; SSSE3-NEXT: movb %al, %cl
+; SSSE3-NEXT: shrb %cl
+; SSSE3-NEXT: andb $85, %cl
+; SSSE3-NEXT: subb %cl, %al
+; SSSE3-NEXT: movb %al, %cl
+; SSSE3-NEXT: andb $51, %cl
+; SSSE3-NEXT: shrb $2, %al
+; SSSE3-NEXT: andb $51, %al
+; SSSE3-NEXT: addb %cl, %al
+; SSSE3-NEXT: movb %al, %cl
+; SSSE3-NEXT: shrb $4, %cl
+; SSSE3-NEXT: addb %al, %cl
+; SSSE3-NEXT: andb $15, %cl
+; SSSE3-NEXT: movzbl %cl, %eax
+; SSSE3-NEXT: movd %eax, %xmm0
+; SSSE3-NEXT: movb -{{[0-9]+}}(%rsp), %r10b
+; SSSE3-NEXT: movb -{{[0-9]+}}(%rsp), %cl
+; SSSE3-NEXT: movb -{{[0-9]+}}(%rsp), %dil
+; SSSE3-NEXT: movb -{{[0-9]+}}(%rsp), %dl
+; SSSE3-NEXT: movb -{{[0-9]+}}(%rsp), %r9b
+; SSSE3-NEXT: movb -{{[0-9]+}}(%rsp), %bpl
+; SSSE3-NEXT: movb -{{[0-9]+}}(%rsp), %sil
+; SSSE3-NEXT: movb -{{[0-9]+}}(%rsp), %bl
+; SSSE3-NEXT: movb %bl, %al
+; SSSE3-NEXT: shrb %al
+; SSSE3-NEXT: andb $85, %al
+; SSSE3-NEXT: subb %al, %bl
+; SSSE3-NEXT: movb %bl, %al
+; SSSE3-NEXT: andb $51, %al
+; SSSE3-NEXT: shrb $2, %bl
+; SSSE3-NEXT: andb $51, %bl
+; SSSE3-NEXT: addb %al, %bl
+; SSSE3-NEXT: movb %bl, %al
+; SSSE3-NEXT: shrb $4, %al
+; SSSE3-NEXT: addb %bl, %al
+; SSSE3-NEXT: andb $15, %al
+; SSSE3-NEXT: movzbl %al, %eax
+; SSSE3-NEXT: movd %eax, %xmm1
+; SSSE3-NEXT: punpcklbw {{.*#+}} xmm1 = xmm1[0],xmm0[0],xmm1[1],xmm0[1],xmm1[2],xmm0[2],xmm1[3],xmm0[3],xmm1[4],xmm0[4],xmm1[5],xmm0[5],xmm1[6],xmm0[6],xmm1[7],xmm0[7]
+; SSSE3-NEXT: movb %dl, %al
+; SSSE3-NEXT: shrb %al
+; SSSE3-NEXT: andb $85, %al
+; SSSE3-NEXT: subb %al, %dl
+; SSSE3-NEXT: movb %dl, %al
+; SSSE3-NEXT: andb $51, %al
+; SSSE3-NEXT: shrb $2, %dl
+; SSSE3-NEXT: andb $51, %dl
+; SSSE3-NEXT: addb %al, %dl
+; SSSE3-NEXT: movb %dl, %al
+; SSSE3-NEXT: shrb $4, %al
+; SSSE3-NEXT: addb %dl, %al
+; SSSE3-NEXT: andb $15, %al
+; SSSE3-NEXT: movzbl %al, %eax
+; SSSE3-NEXT: movd %eax, %xmm2
+; SSSE3-NEXT: movb -{{[0-9]+}}(%rsp), %r11b
+; SSSE3-NEXT: movb -{{[0-9]+}}(%rsp), %dl
+; SSSE3-NEXT: movb -{{[0-9]+}}(%rsp), %r8b
+; SSSE3-NEXT: movb -{{[0-9]+}}(%rsp), %al
+; SSSE3-NEXT: movb %al, %bl
+; SSSE3-NEXT: shrb %bl
+; SSSE3-NEXT: andb $85, %bl
+; SSSE3-NEXT: subb %bl, %al
+; SSSE3-NEXT: movb %al, %bl
+; SSSE3-NEXT: andb $51, %bl
+; SSSE3-NEXT: shrb $2, %al
+; SSSE3-NEXT: andb $51, %al
+; SSSE3-NEXT: addb %bl, %al
+; SSSE3-NEXT: movb %al, %bl
+; SSSE3-NEXT: shrb $4, %bl
+; SSSE3-NEXT: addb %al, %bl
+; SSSE3-NEXT: andb $15, %bl
+; SSSE3-NEXT: movzbl %bl, %eax
+; SSSE3-NEXT: movd %eax, %xmm0
+; SSSE3-NEXT: punpcklbw {{.*#+}} xmm0 = xmm0[0],xmm2[0],xmm0[1],xmm2[1],xmm0[2],xmm2[2],xmm0[3],xmm2[3],xmm0[4],xmm2[4],xmm0[5],xmm2[5],xmm0[6],xmm2[6],xmm0[7],xmm2[7]
+; SSSE3-NEXT: punpcklbw {{.*#+}} xmm0 = xmm0[0],xmm1[0],xmm0[1],xmm1[1],xmm0[2],xmm1[2],xmm0[3],xmm1[3],xmm0[4],xmm1[4],xmm0[5],xmm1[5],xmm0[6],xmm1[6],xmm0[7],xmm1[7]
+; SSSE3-NEXT: movb %cl, %al
+; SSSE3-NEXT: shrb %al
+; SSSE3-NEXT: andb $85, %al
+; SSSE3-NEXT: subb %al, %cl
+; SSSE3-NEXT: movb %cl, %al
+; SSSE3-NEXT: andb $51, %al
+; SSSE3-NEXT: shrb $2, %cl
+; SSSE3-NEXT: andb $51, %cl
+; SSSE3-NEXT: addb %al, %cl
+; SSSE3-NEXT: movb %cl, %al
+; SSSE3-NEXT: shrb $4, %al
+; SSSE3-NEXT: addb %cl, %al
+; SSSE3-NEXT: andb $15, %al
+; SSSE3-NEXT: movzbl %al, %eax
+; SSSE3-NEXT: movd %eax, %xmm1
+; SSSE3-NEXT: movb %dl, %al
+; SSSE3-NEXT: shrb %al
+; SSSE3-NEXT: andb $85, %al
+; SSSE3-NEXT: subb %al, %dl
+; SSSE3-NEXT: movb %dl, %al
+; SSSE3-NEXT: andb $51, %al
+; SSSE3-NEXT: shrb $2, %dl
+; SSSE3-NEXT: andb $51, %dl
+; SSSE3-NEXT: addb %al, %dl
+; SSSE3-NEXT: movb %dl, %al
+; SSSE3-NEXT: shrb $4, %al
+; SSSE3-NEXT: addb %dl, %al
+; SSSE3-NEXT: andb $15, %al
+; SSSE3-NEXT: movzbl %al, %eax
+; SSSE3-NEXT: movd %eax, %xmm2
+; SSSE3-NEXT: punpcklbw {{.*#+}} xmm2 = xmm2[0],xmm1[0],xmm2[1],xmm1[1],xmm2[2],xmm1[2],xmm2[3],xmm1[3],xmm2[4],xmm1[4],xmm2[5],xmm1[5],xmm2[6],xmm1[6],xmm2[7],xmm1[7]
+; SSSE3-NEXT: movb %bpl, %al
+; SSSE3-NEXT: shrb %al
+; SSSE3-NEXT: andb $85, %al
+; SSSE3-NEXT: subb %al, %bpl
+; SSSE3-NEXT: movb %bpl, %al
+; SSSE3-NEXT: andb $51, %al
+; SSSE3-NEXT: shrb $2, %bpl
+; SSSE3-NEXT: andb $51, %bpl
+; SSSE3-NEXT: addb %al, %bpl
+; SSSE3-NEXT: movb %bpl, %al
+; SSSE3-NEXT: shrb $4, %al
+; SSSE3-NEXT: addb %bpl, %al
+; SSSE3-NEXT: andb $15, %al
+; SSSE3-NEXT: movzbl %al, %eax
+; SSSE3-NEXT: movd %eax, %xmm3
+; SSSE3-NEXT: movb -{{[0-9]+}}(%rsp), %al
+; SSSE3-NEXT: movb -{{[0-9]+}}(%rsp), %cl
+; SSSE3-NEXT: movb %cl, %dl
+; SSSE3-NEXT: shrb %dl
+; SSSE3-NEXT: andb $85, %dl
+; SSSE3-NEXT: subb %dl, %cl
+; SSSE3-NEXT: movb %cl, %dl
+; SSSE3-NEXT: andb $51, %dl
+; SSSE3-NEXT: shrb $2, %cl
+; SSSE3-NEXT: andb $51, %cl
+; SSSE3-NEXT: addb %dl, %cl
+; SSSE3-NEXT: movb %cl, %dl
+; SSSE3-NEXT: shrb $4, %dl
+; SSSE3-NEXT: addb %cl, %dl
+; SSSE3-NEXT: andb $15, %dl
+; SSSE3-NEXT: movzbl %dl, %ecx
+; SSSE3-NEXT: movd %ecx, %xmm1
+; SSSE3-NEXT: punpcklbw {{.*#+}} xmm1 = xmm1[0],xmm3[0],xmm1[1],xmm3[1],xmm1[2],xmm3[2],xmm1[3],xmm3[3],xmm1[4],xmm3[4],xmm1[5],xmm3[5],xmm1[6],xmm3[6],xmm1[7],xmm3[7]
+; SSSE3-NEXT: punpcklbw {{.*#+}} xmm1 = xmm1[0],xmm2[0],xmm1[1],xmm2[1],xmm1[2],xmm2[2],xmm1[3],xmm2[3],xmm1[4],xmm2[4],xmm1[5],xmm2[5],xmm1[6],xmm2[6],xmm1[7],xmm2[7]
+; SSSE3-NEXT: punpcklbw {{.*#+}} xmm1 = xmm1[0],xmm0[0],xmm1[1],xmm0[1],xmm1[2],xmm0[2],xmm1[3],xmm0[3],xmm1[4],xmm0[4],xmm1[5],xmm0[5],xmm1[6],xmm0[6],xmm1[7],xmm0[7]
+; SSSE3-NEXT: movb %r10b, %cl
+; SSSE3-NEXT: shrb %cl
+; SSSE3-NEXT: andb $85, %cl
+; SSSE3-NEXT: subb %cl, %r10b
+; SSSE3-NEXT: movb %r10b, %cl
+; SSSE3-NEXT: andb $51, %cl
+; SSSE3-NEXT: shrb $2, %r10b
+; SSSE3-NEXT: andb $51, %r10b
+; SSSE3-NEXT: addb %cl, %r10b
+; SSSE3-NEXT: movb %r10b, %cl
+; SSSE3-NEXT: shrb $4, %cl
+; SSSE3-NEXT: addb %r10b, %cl
+; SSSE3-NEXT: andb $15, %cl
+; SSSE3-NEXT: movzbl %cl, %ecx
+; SSSE3-NEXT: movd %ecx, %xmm2
+; SSSE3-NEXT: movb %r11b, %cl
+; SSSE3-NEXT: shrb %cl
+; SSSE3-NEXT: andb $85, %cl
+; SSSE3-NEXT: subb %cl, %r11b
+; SSSE3-NEXT: movb %r11b, %cl
+; SSSE3-NEXT: andb $51, %cl
+; SSSE3-NEXT: shrb $2, %r11b
+; SSSE3-NEXT: andb $51, %r11b
+; SSSE3-NEXT: addb %cl, %r11b
+; SSSE3-NEXT: movb %r11b, %cl
+; SSSE3-NEXT: shrb $4, %cl
+; SSSE3-NEXT: addb %r11b, %cl
+; SSSE3-NEXT: andb $15, %cl
+; SSSE3-NEXT: movzbl %cl, %ecx
+; SSSE3-NEXT: movd %ecx, %xmm0
+; SSSE3-NEXT: punpcklbw {{.*#+}} xmm0 = xmm0[0],xmm2[0],xmm0[1],xmm2[1],xmm0[2],xmm2[2],xmm0[3],xmm2[3],xmm0[4],xmm2[4],xmm0[5],xmm2[5],xmm0[6],xmm2[6],xmm0[7],xmm2[7]
+; SSSE3-NEXT: movb %r9b, %cl
+; SSSE3-NEXT: shrb %cl
+; SSSE3-NEXT: andb $85, %cl
+; SSSE3-NEXT: subb %cl, %r9b
+; SSSE3-NEXT: movb %r9b, %cl
+; SSSE3-NEXT: andb $51, %cl
+; SSSE3-NEXT: shrb $2, %r9b
+; SSSE3-NEXT: andb $51, %r9b
+; SSSE3-NEXT: addb %cl, %r9b
+; SSSE3-NEXT: movb %r9b, %cl
+; SSSE3-NEXT: shrb $4, %cl
+; SSSE3-NEXT: addb %r9b, %cl
+; SSSE3-NEXT: andb $15, %cl
+; SSSE3-NEXT: movzbl %cl, %ecx
+; SSSE3-NEXT: movd %ecx, %xmm3
+; SSSE3-NEXT: movb %al, %cl
+; SSSE3-NEXT: shrb %cl
+; SSSE3-NEXT: andb $85, %cl
+; SSSE3-NEXT: subb %cl, %al
+; SSSE3-NEXT: movb %al, %cl
+; SSSE3-NEXT: andb $51, %cl
+; SSSE3-NEXT: shrb $2, %al
+; SSSE3-NEXT: andb $51, %al
+; SSSE3-NEXT: addb %cl, %al
+; SSSE3-NEXT: movb %al, %cl
+; SSSE3-NEXT: shrb $4, %cl
+; SSSE3-NEXT: addb %al, %cl
+; SSSE3-NEXT: andb $15, %cl
+; SSSE3-NEXT: movzbl %cl, %eax
+; SSSE3-NEXT: movd %eax, %xmm2
+; SSSE3-NEXT: punpcklbw {{.*#+}} xmm2 = xmm2[0],xmm3[0],xmm2[1],xmm3[1],xmm2[2],xmm3[2],xmm2[3],xmm3[3],xmm2[4],xmm3[4],xmm2[5],xmm3[5],xmm2[6],xmm3[6],xmm2[7],xmm3[7]
+; SSSE3-NEXT: punpcklbw {{.*#+}} xmm2 = xmm2[0],xmm0[0],xmm2[1],xmm0[1],xmm2[2],xmm0[2],xmm2[3],xmm0[3],xmm2[4],xmm0[4],xmm2[5],xmm0[5],xmm2[6],xmm0[6],xmm2[7],xmm0[7]
+; SSSE3-NEXT: movb %dil, %al
+; SSSE3-NEXT: shrb %al
+; SSSE3-NEXT: andb $85, %al
+; SSSE3-NEXT: subb %al, %dil
+; SSSE3-NEXT: movb %dil, %al
+; SSSE3-NEXT: andb $51, %al
+; SSSE3-NEXT: shrb $2, %dil
+; SSSE3-NEXT: andb $51, %dil
+; SSSE3-NEXT: addb %al, %dil
+; SSSE3-NEXT: movb %dil, %al
+; SSSE3-NEXT: shrb $4, %al
+; SSSE3-NEXT: addb %dil, %al
+; SSSE3-NEXT: andb $15, %al
+; SSSE3-NEXT: movzbl %al, %eax
+; SSSE3-NEXT: movd %eax, %xmm0
+; SSSE3-NEXT: movb %r8b, %al
+; SSSE3-NEXT: shrb %al
+; SSSE3-NEXT: andb $85, %al
+; SSSE3-NEXT: subb %al, %r8b
+; SSSE3-NEXT: movb %r8b, %al
+; SSSE3-NEXT: andb $51, %al
+; SSSE3-NEXT: shrb $2, %r8b
+; SSSE3-NEXT: andb $51, %r8b
+; SSSE3-NEXT: addb %al, %r8b
+; SSSE3-NEXT: movb %r8b, %al
+; SSSE3-NEXT: shrb $4, %al
+; SSSE3-NEXT: addb %r8b, %al
+; SSSE3-NEXT: andb $15, %al
+; SSSE3-NEXT: movzbl %al, %eax
+; SSSE3-NEXT: movd %eax, %xmm3
+; SSSE3-NEXT: punpcklbw {{.*#+}} xmm3 = xmm3[0],xmm0[0],xmm3[1],xmm0[1],xmm3[2],xmm0[2],xmm3[3],xmm0[3],xmm3[4],xmm0[4],xmm3[5],xmm0[5],xmm3[6],xmm0[6],xmm3[7],xmm0[7]
+; SSSE3-NEXT: movb %sil, %al
+; SSSE3-NEXT: shrb %al
+; SSSE3-NEXT: andb $85, %al
+; SSSE3-NEXT: subb %al, %sil
+; SSSE3-NEXT: movb %sil, %al
+; SSSE3-NEXT: andb $51, %al
+; SSSE3-NEXT: shrb $2, %sil
+; SSSE3-NEXT: andb $51, %sil
+; SSSE3-NEXT: addb %al, %sil
+; SSSE3-NEXT: movb %sil, %al
+; SSSE3-NEXT: shrb $4, %al
+; SSSE3-NEXT: addb %sil, %al
+; SSSE3-NEXT: andb $15, %al
+; SSSE3-NEXT: movzbl %al, %eax
+; SSSE3-NEXT: movd %eax, %xmm4
+; SSSE3-NEXT: movb -{{[0-9]+}}(%rsp), %al
+; SSSE3-NEXT: movb %al, %cl
+; SSSE3-NEXT: shrb %cl
+; SSSE3-NEXT: andb $85, %cl
+; SSSE3-NEXT: subb %cl, %al
+; SSSE3-NEXT: movb %al, %cl
+; SSSE3-NEXT: andb $51, %cl
+; SSSE3-NEXT: shrb $2, %al
+; SSSE3-NEXT: andb $51, %al
+; SSSE3-NEXT: addb %cl, %al
+; SSSE3-NEXT: movb %al, %cl
+; SSSE3-NEXT: shrb $4, %cl
+; SSSE3-NEXT: addb %al, %cl
+; SSSE3-NEXT: andb $15, %cl
+; SSSE3-NEXT: movzbl %cl, %eax
+; SSSE3-NEXT: movd %eax, %xmm0
+; SSSE3-NEXT: punpcklbw {{.*#+}} xmm0 = xmm0[0],xmm4[0],xmm0[1],xmm4[1],xmm0[2],xmm4[2],xmm0[3],xmm4[3],xmm0[4],xmm4[4],xmm0[5],xmm4[5],xmm0[6],xmm4[6],xmm0[7],xmm4[7]
+; SSSE3-NEXT: punpcklbw {{.*#+}} xmm0 = xmm0[0],xmm3[0],xmm0[1],xmm3[1],xmm0[2],xmm3[2],xmm0[3],xmm3[3],xmm0[4],xmm3[4],xmm0[5],xmm3[5],xmm0[6],xmm3[6],xmm0[7],xmm3[7]
+; SSSE3-NEXT: punpcklbw {{.*#+}} xmm0 = xmm0[0],xmm2[0],xmm0[1],xmm2[1],xmm0[2],xmm2[2],xmm0[3],xmm2[3],xmm0[4],xmm2[4],xmm0[5],xmm2[5],xmm0[6],xmm2[6],xmm0[7],xmm2[7]
+; SSSE3-NEXT: punpcklbw {{.*#+}} xmm0 = xmm0[0],xmm1[0],xmm0[1],xmm1[1],xmm0[2],xmm1[2],xmm0[3],xmm1[3],xmm0[4],xmm1[4],xmm0[5],xmm1[5],xmm0[6],xmm1[6],xmm0[7],xmm1[7]
+; SSSE3-NEXT: popq %rbx
+; SSSE3-NEXT: popq %rbp
+; SSSE3-NEXT: retq
+;
+; SSE41-LABEL: testv16i8:
+; SSE41: # BB#0:
+; SSE41-NEXT: pextrb $1, %xmm0, %eax
+; SSE41-NEXT: movb %al, %cl
+; SSE41-NEXT: shrb %cl
+; SSE41-NEXT: andb $85, %cl
+; SSE41-NEXT: subb %cl, %al
+; SSE41-NEXT: movb %al, %cl
+; SSE41-NEXT: andb $51, %cl
+; SSE41-NEXT: shrb $2, %al
+; SSE41-NEXT: andb $51, %al
+; SSE41-NEXT: addb %cl, %al
+; SSE41-NEXT: movb %al, %cl
+; SSE41-NEXT: shrb $4, %cl
+; SSE41-NEXT: addb %al, %cl
+; SSE41-NEXT: andb $15, %cl
+; SSE41-NEXT: movzbl %cl, %eax
+; SSE41-NEXT: pextrb $0, %xmm0, %ecx
+; SSE41-NEXT: movb %cl, %dl
+; SSE41-NEXT: shrb %dl
+; SSE41-NEXT: andb $85, %dl
+; SSE41-NEXT: subb %dl, %cl
+; SSE41-NEXT: movb %cl, %dl
+; SSE41-NEXT: andb $51, %dl
+; SSE41-NEXT: shrb $2, %cl
+; SSE41-NEXT: andb $51, %cl
+; SSE41-NEXT: addb %dl, %cl
+; SSE41-NEXT: movb %cl, %dl
+; SSE41-NEXT: shrb $4, %dl
+; SSE41-NEXT: addb %cl, %dl
+; SSE41-NEXT: andb $15, %dl
+; SSE41-NEXT: movzbl %dl, %ecx
+; SSE41-NEXT: movd %ecx, %xmm1
+; SSE41-NEXT: pinsrb $1, %eax, %xmm1
+; SSE41-NEXT: pextrb $2, %xmm0, %eax
+; SSE41-NEXT: movb %al, %cl
+; SSE41-NEXT: shrb %cl
+; SSE41-NEXT: andb $85, %cl
+; SSE41-NEXT: subb %cl, %al
+; SSE41-NEXT: movb %al, %cl
+; SSE41-NEXT: andb $51, %cl
+; SSE41-NEXT: shrb $2, %al
+; SSE41-NEXT: andb $51, %al
+; SSE41-NEXT: addb %cl, %al
+; SSE41-NEXT: movb %al, %cl
+; SSE41-NEXT: shrb $4, %cl
+; SSE41-NEXT: addb %al, %cl
+; SSE41-NEXT: andb $15, %cl
+; SSE41-NEXT: movzbl %cl, %eax
+; SSE41-NEXT: pinsrb $2, %eax, %xmm1
+; SSE41-NEXT: pextrb $3, %xmm0, %eax
+; SSE41-NEXT: movb %al, %cl
+; SSE41-NEXT: shrb %cl
+; SSE41-NEXT: andb $85, %cl
+; SSE41-NEXT: subb %cl, %al
+; SSE41-NEXT: movb %al, %cl
+; SSE41-NEXT: andb $51, %cl
+; SSE41-NEXT: shrb $2, %al
+; SSE41-NEXT: andb $51, %al
+; SSE41-NEXT: addb %cl, %al
+; SSE41-NEXT: movb %al, %cl
+; SSE41-NEXT: shrb $4, %cl
+; SSE41-NEXT: addb %al, %cl
+; SSE41-NEXT: andb $15, %cl
+; SSE41-NEXT: movzbl %cl, %eax
+; SSE41-NEXT: pinsrb $3, %eax, %xmm1
+; SSE41-NEXT: pextrb $4, %xmm0, %eax
+; SSE41-NEXT: movb %al, %cl
+; SSE41-NEXT: shrb %cl
+; SSE41-NEXT: andb $85, %cl
+; SSE41-NEXT: subb %cl, %al
+; SSE41-NEXT: movb %al, %cl
+; SSE41-NEXT: andb $51, %cl
+; SSE41-NEXT: shrb $2, %al
+; SSE41-NEXT: andb $51, %al
+; SSE41-NEXT: addb %cl, %al
+; SSE41-NEXT: movb %al, %cl
+; SSE41-NEXT: shrb $4, %cl
+; SSE41-NEXT: addb %al, %cl
+; SSE41-NEXT: andb $15, %cl
+; SSE41-NEXT: movzbl %cl, %eax
+; SSE41-NEXT: pinsrb $4, %eax, %xmm1
+; SSE41-NEXT: pextrb $5, %xmm0, %eax
+; SSE41-NEXT: movb %al, %cl
+; SSE41-NEXT: shrb %cl
+; SSE41-NEXT: andb $85, %cl
+; SSE41-NEXT: subb %cl, %al
+; SSE41-NEXT: movb %al, %cl
+; SSE41-NEXT: andb $51, %cl
+; SSE41-NEXT: shrb $2, %al
+; SSE41-NEXT: andb $51, %al
+; SSE41-NEXT: addb %cl, %al
+; SSE41-NEXT: movb %al, %cl
+; SSE41-NEXT: shrb $4, %cl
+; SSE41-NEXT: addb %al, %cl
+; SSE41-NEXT: andb $15, %cl
+; SSE41-NEXT: movzbl %cl, %eax
+; SSE41-NEXT: pinsrb $5, %eax, %xmm1
+; SSE41-NEXT: pextrb $6, %xmm0, %eax
+; SSE41-NEXT: movb %al, %cl
+; SSE41-NEXT: shrb %cl
+; SSE41-NEXT: andb $85, %cl
+; SSE41-NEXT: subb %cl, %al
+; SSE41-NEXT: movb %al, %cl
+; SSE41-NEXT: andb $51, %cl
+; SSE41-NEXT: shrb $2, %al
+; SSE41-NEXT: andb $51, %al
+; SSE41-NEXT: addb %cl, %al
+; SSE41-NEXT: movb %al, %cl
+; SSE41-NEXT: shrb $4, %cl
+; SSE41-NEXT: addb %al, %cl
+; SSE41-NEXT: andb $15, %cl
+; SSE41-NEXT: movzbl %cl, %eax
+; SSE41-NEXT: pinsrb $6, %eax, %xmm1
+; SSE41-NEXT: pextrb $7, %xmm0, %eax
+; SSE41-NEXT: movb %al, %cl
+; SSE41-NEXT: shrb %cl
+; SSE41-NEXT: andb $85, %cl
+; SSE41-NEXT: subb %cl, %al
+; SSE41-NEXT: movb %al, %cl
+; SSE41-NEXT: andb $51, %cl
+; SSE41-NEXT: shrb $2, %al
+; SSE41-NEXT: andb $51, %al
+; SSE41-NEXT: addb %cl, %al
+; SSE41-NEXT: movb %al, %cl
+; SSE41-NEXT: shrb $4, %cl
+; SSE41-NEXT: addb %al, %cl
+; SSE41-NEXT: andb $15, %cl
+; SSE41-NEXT: movzbl %cl, %eax
+; SSE41-NEXT: pinsrb $7, %eax, %xmm1
+; SSE41-NEXT: pextrb $8, %xmm0, %eax
+; SSE41-NEXT: movb %al, %cl
+; SSE41-NEXT: shrb %cl
+; SSE41-NEXT: andb $85, %cl
+; SSE41-NEXT: subb %cl, %al
+; SSE41-NEXT: movb %al, %cl
+; SSE41-NEXT: andb $51, %cl
+; SSE41-NEXT: shrb $2, %al
+; SSE41-NEXT: andb $51, %al
+; SSE41-NEXT: addb %cl, %al
+; SSE41-NEXT: movb %al, %cl
+; SSE41-NEXT: shrb $4, %cl
+; SSE41-NEXT: addb %al, %cl
+; SSE41-NEXT: andb $15, %cl
+; SSE41-NEXT: movzbl %cl, %eax
+; SSE41-NEXT: pinsrb $8, %eax, %xmm1
+; SSE41-NEXT: pextrb $9, %xmm0, %eax
+; SSE41-NEXT: movb %al, %cl
+; SSE41-NEXT: shrb %cl
+; SSE41-NEXT: andb $85, %cl
+; SSE41-NEXT: subb %cl, %al
+; SSE41-NEXT: movb %al, %cl
+; SSE41-NEXT: andb $51, %cl
+; SSE41-NEXT: shrb $2, %al
+; SSE41-NEXT: andb $51, %al
+; SSE41-NEXT: addb %cl, %al
+; SSE41-NEXT: movb %al, %cl
+; SSE41-NEXT: shrb $4, %cl
+; SSE41-NEXT: addb %al, %cl
+; SSE41-NEXT: andb $15, %cl
+; SSE41-NEXT: movzbl %cl, %eax
+; SSE41-NEXT: pinsrb $9, %eax, %xmm1
+; SSE41-NEXT: pextrb $10, %xmm0, %eax
+; SSE41-NEXT: movb %al, %cl
+; SSE41-NEXT: shrb %cl
+; SSE41-NEXT: andb $85, %cl
+; SSE41-NEXT: subb %cl, %al
+; SSE41-NEXT: movb %al, %cl
+; SSE41-NEXT: andb $51, %cl
+; SSE41-NEXT: shrb $2, %al
+; SSE41-NEXT: andb $51, %al
+; SSE41-NEXT: addb %cl, %al
+; SSE41-NEXT: movb %al, %cl
+; SSE41-NEXT: shrb $4, %cl
+; SSE41-NEXT: addb %al, %cl
+; SSE41-NEXT: andb $15, %cl
+; SSE41-NEXT: movzbl %cl, %eax
+; SSE41-NEXT: pinsrb $10, %eax, %xmm1
+; SSE41-NEXT: pextrb $11, %xmm0, %eax
+; SSE41-NEXT: movb %al, %cl
+; SSE41-NEXT: shrb %cl
+; SSE41-NEXT: andb $85, %cl
+; SSE41-NEXT: subb %cl, %al
+; SSE41-NEXT: movb %al, %cl
+; SSE41-NEXT: andb $51, %cl
+; SSE41-NEXT: shrb $2, %al
+; SSE41-NEXT: andb $51, %al
+; SSE41-NEXT: addb %cl, %al
+; SSE41-NEXT: movb %al, %cl
+; SSE41-NEXT: shrb $4, %cl
+; SSE41-NEXT: addb %al, %cl
+; SSE41-NEXT: andb $15, %cl
+; SSE41-NEXT: movzbl %cl, %eax
+; SSE41-NEXT: pinsrb $11, %eax, %xmm1
+; SSE41-NEXT: pextrb $12, %xmm0, %eax
+; SSE41-NEXT: movb %al, %cl
+; SSE41-NEXT: shrb %cl
+; SSE41-NEXT: andb $85, %cl
+; SSE41-NEXT: subb %cl, %al
+; SSE41-NEXT: movb %al, %cl
+; SSE41-NEXT: andb $51, %cl
+; SSE41-NEXT: shrb $2, %al
+; SSE41-NEXT: andb $51, %al
+; SSE41-NEXT: addb %cl, %al
+; SSE41-NEXT: movb %al, %cl
+; SSE41-NEXT: shrb $4, %cl
+; SSE41-NEXT: addb %al, %cl
+; SSE41-NEXT: andb $15, %cl
+; SSE41-NEXT: movzbl %cl, %eax
+; SSE41-NEXT: pinsrb $12, %eax, %xmm1
+; SSE41-NEXT: pextrb $13, %xmm0, %eax
+; SSE41-NEXT: movb %al, %cl
+; SSE41-NEXT: shrb %cl
+; SSE41-NEXT: andb $85, %cl
+; SSE41-NEXT: subb %cl, %al
+; SSE41-NEXT: movb %al, %cl
+; SSE41-NEXT: andb $51, %cl
+; SSE41-NEXT: shrb $2, %al
+; SSE41-NEXT: andb $51, %al
+; SSE41-NEXT: addb %cl, %al
+; SSE41-NEXT: movb %al, %cl
+; SSE41-NEXT: shrb $4, %cl
+; SSE41-NEXT: addb %al, %cl
+; SSE41-NEXT: andb $15, %cl
+; SSE41-NEXT: movzbl %cl, %eax
+; SSE41-NEXT: pinsrb $13, %eax, %xmm1
+; SSE41-NEXT: pextrb $14, %xmm0, %eax
+; SSE41-NEXT: movb %al, %cl
+; SSE41-NEXT: shrb %cl
+; SSE41-NEXT: andb $85, %cl
+; SSE41-NEXT: subb %cl, %al
+; SSE41-NEXT: movb %al, %cl
+; SSE41-NEXT: andb $51, %cl
+; SSE41-NEXT: shrb $2, %al
+; SSE41-NEXT: andb $51, %al
+; SSE41-NEXT: addb %cl, %al
+; SSE41-NEXT: movb %al, %cl
+; SSE41-NEXT: shrb $4, %cl
+; SSE41-NEXT: addb %al, %cl
+; SSE41-NEXT: andb $15, %cl
+; SSE41-NEXT: movzbl %cl, %eax
+; SSE41-NEXT: pinsrb $14, %eax, %xmm1
+; SSE41-NEXT: pextrb $15, %xmm0, %eax
+; SSE41-NEXT: movb %al, %cl
+; SSE41-NEXT: shrb %cl
+; SSE41-NEXT: andb $85, %cl
+; SSE41-NEXT: subb %cl, %al
+; SSE41-NEXT: movb %al, %cl
+; SSE41-NEXT: andb $51, %cl
+; SSE41-NEXT: shrb $2, %al
+; SSE41-NEXT: andb $51, %al
+; SSE41-NEXT: addb %cl, %al
+; SSE41-NEXT: movb %al, %cl
+; SSE41-NEXT: shrb $4, %cl
+; SSE41-NEXT: addb %al, %cl
+; SSE41-NEXT: andb $15, %cl
+; SSE41-NEXT: movzbl %cl, %eax
+; SSE41-NEXT: pinsrb $15, %eax, %xmm1
+; SSE41-NEXT: movdqa %xmm1, %xmm0
+; SSE41-NEXT: retq
+;
+; AVX-LABEL: testv16i8:
+; AVX: # BB#0:
+; AVX-NEXT: vpextrb $1, %xmm0, %eax
+; AVX-NEXT: movb %al, %cl
+; AVX-NEXT: shrb %cl
+; AVX-NEXT: andb $85, %cl
+; AVX-NEXT: subb %cl, %al
+; AVX-NEXT: movb %al, %cl
+; AVX-NEXT: andb $51, %cl
+; AVX-NEXT: shrb $2, %al
+; AVX-NEXT: andb $51, %al
+; AVX-NEXT: addb %cl, %al
+; AVX-NEXT: movb %al, %cl
+; AVX-NEXT: shrb $4, %cl
+; AVX-NEXT: addb %al, %cl
+; AVX-NEXT: andb $15, %cl
+; AVX-NEXT: movzbl %cl, %eax
+; AVX-NEXT: vpextrb $0, %xmm0, %ecx
+; AVX-NEXT: movb %cl, %dl
+; AVX-NEXT: shrb %dl
+; AVX-NEXT: andb $85, %dl
+; AVX-NEXT: subb %dl, %cl
+; AVX-NEXT: movb %cl, %dl
+; AVX-NEXT: andb $51, %dl
+; AVX-NEXT: shrb $2, %cl
+; AVX-NEXT: andb $51, %cl
+; AVX-NEXT: addb %dl, %cl
+; AVX-NEXT: movb %cl, %dl
+; AVX-NEXT: shrb $4, %dl
+; AVX-NEXT: addb %cl, %dl
+; AVX-NEXT: andb $15, %dl
+; AVX-NEXT: movzbl %dl, %ecx
+; AVX-NEXT: vmovd %ecx, %xmm1
+; AVX-NEXT: vpinsrb $1, %eax, %xmm1, %xmm1
+; AVX-NEXT: vpextrb $2, %xmm0, %eax
+; AVX-NEXT: movb %al, %cl
+; AVX-NEXT: shrb %cl
+; AVX-NEXT: andb $85, %cl
+; AVX-NEXT: subb %cl, %al
+; AVX-NEXT: movb %al, %cl
+; AVX-NEXT: andb $51, %cl
+; AVX-NEXT: shrb $2, %al
+; AVX-NEXT: andb $51, %al
+; AVX-NEXT: addb %cl, %al
+; AVX-NEXT: movb %al, %cl
+; AVX-NEXT: shrb $4, %cl
+; AVX-NEXT: addb %al, %cl
+; AVX-NEXT: andb $15, %cl
+; AVX-NEXT: movzbl %cl, %eax
+; AVX-NEXT: vpinsrb $2, %eax, %xmm1, %xmm1
+; AVX-NEXT: vpextrb $3, %xmm0, %eax
+; AVX-NEXT: movb %al, %cl
+; AVX-NEXT: shrb %cl
+; AVX-NEXT: andb $85, %cl
+; AVX-NEXT: subb %cl, %al
+; AVX-NEXT: movb %al, %cl
+; AVX-NEXT: andb $51, %cl
+; AVX-NEXT: shrb $2, %al
+; AVX-NEXT: andb $51, %al
+; AVX-NEXT: addb %cl, %al
+; AVX-NEXT: movb %al, %cl
+; AVX-NEXT: shrb $4, %cl
+; AVX-NEXT: addb %al, %cl
+; AVX-NEXT: andb $15, %cl
+; AVX-NEXT: movzbl %cl, %eax
+; AVX-NEXT: vpinsrb $3, %eax, %xmm1, %xmm1
+; AVX-NEXT: vpextrb $4, %xmm0, %eax
+; AVX-NEXT: movb %al, %cl
+; AVX-NEXT: shrb %cl
+; AVX-NEXT: andb $85, %cl
+; AVX-NEXT: subb %cl, %al
+; AVX-NEXT: movb %al, %cl
+; AVX-NEXT: andb $51, %cl
+; AVX-NEXT: shrb $2, %al
+; AVX-NEXT: andb $51, %al
+; AVX-NEXT: addb %cl, %al
+; AVX-NEXT: movb %al, %cl
+; AVX-NEXT: shrb $4, %cl
+; AVX-NEXT: addb %al, %cl
+; AVX-NEXT: andb $15, %cl
+; AVX-NEXT: movzbl %cl, %eax
+; AVX-NEXT: vpinsrb $4, %eax, %xmm1, %xmm1
+; AVX-NEXT: vpextrb $5, %xmm0, %eax
+; AVX-NEXT: movb %al, %cl
+; AVX-NEXT: shrb %cl
+; AVX-NEXT: andb $85, %cl
+; AVX-NEXT: subb %cl, %al
+; AVX-NEXT: movb %al, %cl
+; AVX-NEXT: andb $51, %cl
+; AVX-NEXT: shrb $2, %al
+; AVX-NEXT: andb $51, %al
+; AVX-NEXT: addb %cl, %al
+; AVX-NEXT: movb %al, %cl
+; AVX-NEXT: shrb $4, %cl
+; AVX-NEXT: addb %al, %cl
+; AVX-NEXT: andb $15, %cl
+; AVX-NEXT: movzbl %cl, %eax
+; AVX-NEXT: vpinsrb $5, %eax, %xmm1, %xmm1
+; AVX-NEXT: vpextrb $6, %xmm0, %eax
+; AVX-NEXT: movb %al, %cl
+; AVX-NEXT: shrb %cl
+; AVX-NEXT: andb $85, %cl
+; AVX-NEXT: subb %cl, %al
+; AVX-NEXT: movb %al, %cl
+; AVX-NEXT: andb $51, %cl
+; AVX-NEXT: shrb $2, %al
+; AVX-NEXT: andb $51, %al
+; AVX-NEXT: addb %cl, %al
+; AVX-NEXT: movb %al, %cl
+; AVX-NEXT: shrb $4, %cl
+; AVX-NEXT: addb %al, %cl
+; AVX-NEXT: andb $15, %cl
+; AVX-NEXT: movzbl %cl, %eax
+; AVX-NEXT: vpinsrb $6, %eax, %xmm1, %xmm1
+; AVX-NEXT: vpextrb $7, %xmm0, %eax
+; AVX-NEXT: movb %al, %cl
+; AVX-NEXT: shrb %cl
+; AVX-NEXT: andb $85, %cl
+; AVX-NEXT: subb %cl, %al
+; AVX-NEXT: movb %al, %cl
+; AVX-NEXT: andb $51, %cl
+; AVX-NEXT: shrb $2, %al
+; AVX-NEXT: andb $51, %al
+; AVX-NEXT: addb %cl, %al
+; AVX-NEXT: movb %al, %cl
+; AVX-NEXT: shrb $4, %cl
+; AVX-NEXT: addb %al, %cl
+; AVX-NEXT: andb $15, %cl
+; AVX-NEXT: movzbl %cl, %eax
+; AVX-NEXT: vpinsrb $7, %eax, %xmm1, %xmm1
+; AVX-NEXT: vpextrb $8, %xmm0, %eax
+; AVX-NEXT: movb %al, %cl
+; AVX-NEXT: shrb %cl
+; AVX-NEXT: andb $85, %cl
+; AVX-NEXT: subb %cl, %al
+; AVX-NEXT: movb %al, %cl
+; AVX-NEXT: andb $51, %cl
+; AVX-NEXT: shrb $2, %al
+; AVX-NEXT: andb $51, %al
+; AVX-NEXT: addb %cl, %al
+; AVX-NEXT: movb %al, %cl
+; AVX-NEXT: shrb $4, %cl
+; AVX-NEXT: addb %al, %cl
+; AVX-NEXT: andb $15, %cl
+; AVX-NEXT: movzbl %cl, %eax
+; AVX-NEXT: vpinsrb $8, %eax, %xmm1, %xmm1
+; AVX-NEXT: vpextrb $9, %xmm0, %eax
+; AVX-NEXT: movb %al, %cl
+; AVX-NEXT: shrb %cl
+; AVX-NEXT: andb $85, %cl
+; AVX-NEXT: subb %cl, %al
+; AVX-NEXT: movb %al, %cl
+; AVX-NEXT: andb $51, %cl
+; AVX-NEXT: shrb $2, %al
+; AVX-NEXT: andb $51, %al
+; AVX-NEXT: addb %cl, %al
+; AVX-NEXT: movb %al, %cl
+; AVX-NEXT: shrb $4, %cl
+; AVX-NEXT: addb %al, %cl
+; AVX-NEXT: andb $15, %cl
+; AVX-NEXT: movzbl %cl, %eax
+; AVX-NEXT: vpinsrb $9, %eax, %xmm1, %xmm1
+; AVX-NEXT: vpextrb $10, %xmm0, %eax
+; AVX-NEXT: movb %al, %cl
+; AVX-NEXT: shrb %cl
+; AVX-NEXT: andb $85, %cl
+; AVX-NEXT: subb %cl, %al
+; AVX-NEXT: movb %al, %cl
+; AVX-NEXT: andb $51, %cl
+; AVX-NEXT: shrb $2, %al
+; AVX-NEXT: andb $51, %al
+; AVX-NEXT: addb %cl, %al
+; AVX-NEXT: movb %al, %cl
+; AVX-NEXT: shrb $4, %cl
+; AVX-NEXT: addb %al, %cl
+; AVX-NEXT: andb $15, %cl
+; AVX-NEXT: movzbl %cl, %eax
+; AVX-NEXT: vpinsrb $10, %eax, %xmm1, %xmm1
+; AVX-NEXT: vpextrb $11, %xmm0, %eax
+; AVX-NEXT: movb %al, %cl
+; AVX-NEXT: shrb %cl
+; AVX-NEXT: andb $85, %cl
+; AVX-NEXT: subb %cl, %al
+; AVX-NEXT: movb %al, %cl
+; AVX-NEXT: andb $51, %cl
+; AVX-NEXT: shrb $2, %al
+; AVX-NEXT: andb $51, %al
+; AVX-NEXT: addb %cl, %al
+; AVX-NEXT: movb %al, %cl
+; AVX-NEXT: shrb $4, %cl
+; AVX-NEXT: addb %al, %cl
+; AVX-NEXT: andb $15, %cl
+; AVX-NEXT: movzbl %cl, %eax
+; AVX-NEXT: vpinsrb $11, %eax, %xmm1, %xmm1
+; AVX-NEXT: vpextrb $12, %xmm0, %eax
+; AVX-NEXT: movb %al, %cl
+; AVX-NEXT: shrb %cl
+; AVX-NEXT: andb $85, %cl
+; AVX-NEXT: subb %cl, %al
+; AVX-NEXT: movb %al, %cl
+; AVX-NEXT: andb $51, %cl
+; AVX-NEXT: shrb $2, %al
+; AVX-NEXT: andb $51, %al
+; AVX-NEXT: addb %cl, %al
+; AVX-NEXT: movb %al, %cl
+; AVX-NEXT: shrb $4, %cl
+; AVX-NEXT: addb %al, %cl
+; AVX-NEXT: andb $15, %cl
+; AVX-NEXT: movzbl %cl, %eax
+; AVX-NEXT: vpinsrb $12, %eax, %xmm1, %xmm1
+; AVX-NEXT: vpextrb $13, %xmm0, %eax
+; AVX-NEXT: movb %al, %cl
+; AVX-NEXT: shrb %cl
+; AVX-NEXT: andb $85, %cl
+; AVX-NEXT: subb %cl, %al
+; AVX-NEXT: movb %al, %cl
+; AVX-NEXT: andb $51, %cl
+; AVX-NEXT: shrb $2, %al
+; AVX-NEXT: andb $51, %al
+; AVX-NEXT: addb %cl, %al
+; AVX-NEXT: movb %al, %cl
+; AVX-NEXT: shrb $4, %cl
+; AVX-NEXT: addb %al, %cl
+; AVX-NEXT: andb $15, %cl
+; AVX-NEXT: movzbl %cl, %eax
+; AVX-NEXT: vpinsrb $13, %eax, %xmm1, %xmm1
+; AVX-NEXT: vpextrb $14, %xmm0, %eax
+; AVX-NEXT: movb %al, %cl
+; AVX-NEXT: shrb %cl
+; AVX-NEXT: andb $85, %cl
+; AVX-NEXT: subb %cl, %al
+; AVX-NEXT: movb %al, %cl
+; AVX-NEXT: andb $51, %cl
+; AVX-NEXT: shrb $2, %al
+; AVX-NEXT: andb $51, %al
+; AVX-NEXT: addb %cl, %al
+; AVX-NEXT: movb %al, %cl
+; AVX-NEXT: shrb $4, %cl
+; AVX-NEXT: addb %al, %cl
+; AVX-NEXT: andb $15, %cl
+; AVX-NEXT: movzbl %cl, %eax
+; AVX-NEXT: vpinsrb $14, %eax, %xmm1, %xmm1
+; AVX-NEXT: vpextrb $15, %xmm0, %eax
+; AVX-NEXT: movb %al, %cl
+; AVX-NEXT: shrb %cl
+; AVX-NEXT: andb $85, %cl
+; AVX-NEXT: subb %cl, %al
+; AVX-NEXT: movb %al, %cl
+; AVX-NEXT: andb $51, %cl
+; AVX-NEXT: shrb $2, %al
+; AVX-NEXT: andb $51, %al
+; AVX-NEXT: addb %cl, %al
+; AVX-NEXT: movb %al, %cl
+; AVX-NEXT: shrb $4, %cl
+; AVX-NEXT: addb %al, %cl
+; AVX-NEXT: andb $15, %cl
+; AVX-NEXT: movzbl %cl, %eax
+; AVX-NEXT: vpinsrb $15, %eax, %xmm1, %xmm0
+; AVX-NEXT: retq
+ %out = call <16 x i8> @llvm.ctpop.v16i8(<16 x i8> %in)
+ ret <16 x i8> %out
+}
+
+declare <2 x i64> @llvm.ctpop.v2i64(<2 x i64>)
+declare <4 x i32> @llvm.ctpop.v4i32(<4 x i32>)
+declare <8 x i16> @llvm.ctpop.v8i16(<8 x i16>)
+declare <16 x i8> @llvm.ctpop.v16i8(<16 x i8>)
--- /dev/null
+; RUN: llc < %s -mcpu=x86-64 -mattr=+avx | FileCheck %s --check-prefix=ALL --check-prefix=AVX --check-prefix=AVX1
+; RUN: llc < %s -mcpu=x86-64 -mattr=+avx2 | FileCheck %s --check-prefix=ALL --check-prefix=AVX --check-prefix=AVX2
+
+target triple = "x86_64-unknown-unknown"
+
+define <4 x i64> @testv4i64(<4 x i64> %in) {
+; AVX1-LABEL: testv4i64:
+; AVX1: # BB#0:
+; AVX1-NEXT: vextractf128 $1, %ymm0, %xmm1
+; AVX1-NEXT: vpextrq $1, %xmm1, %rdx
+; AVX1-NEXT: movq %rdx, %rax
+; AVX1-NEXT: shrq %rax
+; AVX1-NEXT: movabsq $6148914691236517205, %r8 # imm = 0x5555555555555555
+; AVX1-NEXT: andq %r8, %rax
+; AVX1-NEXT: subq %rax, %rdx
+; AVX1-NEXT: movabsq $3689348814741910323, %rax # imm = 0x3333333333333333
+; AVX1-NEXT: movq %rdx, %rsi
+; AVX1-NEXT: andq %rax, %rsi
+; AVX1-NEXT: shrq $2, %rdx
+; AVX1-NEXT: andq %rax, %rdx
+; AVX1-NEXT: addq %rsi, %rdx
+; AVX1-NEXT: movq %rdx, %rdi
+; AVX1-NEXT: shrq $4, %rdi
+; AVX1-NEXT: addq %rdx, %rdi
+; AVX1-NEXT: movabsq $1085102592571150095, %rdx # imm = 0xF0F0F0F0F0F0F0F
+; AVX1-NEXT: andq %rdx, %rdi
+; AVX1-NEXT: movabsq $72340172838076673, %rsi # imm = 0x101010101010101
+; AVX1-NEXT: imulq %rsi, %rdi
+; AVX1-NEXT: shrq $56, %rdi
+; AVX1-NEXT: vmovq %rdi, %xmm2
+; AVX1-NEXT: vmovq %xmm1, %rcx
+; AVX1-NEXT: movq %rcx, %rdi
+; AVX1-NEXT: shrq %rdi
+; AVX1-NEXT: andq %r8, %rdi
+; AVX1-NEXT: subq %rdi, %rcx
+; AVX1-NEXT: movq %rcx, %rdi
+; AVX1-NEXT: andq %rax, %rdi
+; AVX1-NEXT: shrq $2, %rcx
+; AVX1-NEXT: andq %rax, %rcx
+; AVX1-NEXT: addq %rdi, %rcx
+; AVX1-NEXT: movq %rcx, %rdi
+; AVX1-NEXT: shrq $4, %rdi
+; AVX1-NEXT: addq %rcx, %rdi
+; AVX1-NEXT: andq %rdx, %rdi
+; AVX1-NEXT: imulq %rsi, %rdi
+; AVX1-NEXT: shrq $56, %rdi
+; AVX1-NEXT: vmovq %rdi, %xmm1
+; AVX1-NEXT: vpunpcklqdq {{.*#+}} xmm1 = xmm1[0],xmm2[0]
+; AVX1-NEXT: vpextrq $1, %xmm0, %rcx
+; AVX1-NEXT: movq %rcx, %rdi
+; AVX1-NEXT: shrq %rdi
+; AVX1-NEXT: andq %r8, %rdi
+; AVX1-NEXT: subq %rdi, %rcx
+; AVX1-NEXT: movq %rcx, %rdi
+; AVX1-NEXT: andq %rax, %rdi
+; AVX1-NEXT: shrq $2, %rcx
+; AVX1-NEXT: andq %rax, %rcx
+; AVX1-NEXT: addq %rdi, %rcx
+; AVX1-NEXT: movq %rcx, %rdi
+; AVX1-NEXT: shrq $4, %rdi
+; AVX1-NEXT: addq %rcx, %rdi
+; AVX1-NEXT: andq %rdx, %rdi
+; AVX1-NEXT: imulq %rsi, %rdi
+; AVX1-NEXT: shrq $56, %rdi
+; AVX1-NEXT: vmovq %rdi, %xmm2
+; AVX1-NEXT: vmovq %xmm0, %rcx
+; AVX1-NEXT: movq %rcx, %rdi
+; AVX1-NEXT: shrq %rdi
+; AVX1-NEXT: andq %r8, %rdi
+; AVX1-NEXT: subq %rdi, %rcx
+; AVX1-NEXT: movq %rcx, %rdi
+; AVX1-NEXT: andq %rax, %rdi
+; AVX1-NEXT: shrq $2, %rcx
+; AVX1-NEXT: andq %rax, %rcx
+; AVX1-NEXT: addq %rdi, %rcx
+; AVX1-NEXT: movq %rcx, %rax
+; AVX1-NEXT: shrq $4, %rax
+; AVX1-NEXT: addq %rcx, %rax
+; AVX1-NEXT: andq %rdx, %rax
+; AVX1-NEXT: imulq %rsi, %rax
+; AVX1-NEXT: shrq $56, %rax
+; AVX1-NEXT: vmovq %rax, %xmm0
+; AVX1-NEXT: vpunpcklqdq {{.*#+}} xmm0 = xmm0[0],xmm2[0]
+; AVX1-NEXT: vinsertf128 $1, %xmm1, %ymm0, %ymm0
+; AVX1-NEXT: retq
+;
+; AVX2-LABEL: testv4i64:
+; AVX2: # BB#0:
+; AVX2-NEXT: vpsrlq $1, %ymm0, %ymm1
+; AVX2-NEXT: vpbroadcastq {{.*}}(%rip), %ymm2
+; AVX2-NEXT: vpand %ymm2, %ymm1, %ymm1
+; AVX2-NEXT: vpsubq %ymm1, %ymm0, %ymm0
+; AVX2-NEXT: vpbroadcastq {{.*}}(%rip), %ymm1
+; AVX2-NEXT: vpand %ymm1, %ymm0, %ymm2
+; AVX2-NEXT: vpsrlq $2, %ymm0, %ymm0
+; AVX2-NEXT: vpand %ymm1, %ymm0, %ymm0
+; AVX2-NEXT: vpaddq %ymm0, %ymm2, %ymm0
+; AVX2-NEXT: vpsrlq $4, %ymm0, %ymm1
+; AVX2-NEXT: vpaddq %ymm1, %ymm0, %ymm0
+; AVX2-NEXT: vpbroadcastq {{.*}}(%rip), %ymm1
+; AVX2-NEXT: vpand %ymm1, %ymm0, %ymm0
+; AVX2-NEXT: vpsrlq $8, %ymm0, %ymm1
+; AVX2-NEXT: vpaddq %ymm1, %ymm0, %ymm0
+; AVX2-NEXT: vpsrlq $16, %ymm0, %ymm1
+; AVX2-NEXT: vpaddq %ymm1, %ymm0, %ymm0
+; AVX2-NEXT: vpsrlq $32, %ymm0, %ymm1
+; AVX2-NEXT: vpaddq %ymm1, %ymm0, %ymm0
+; AVX2-NEXT: vpbroadcastq {{.*}}(%rip), %ymm1
+; AVX2-NEXT: vpand %ymm1, %ymm0, %ymm0
+; AVX2-NEXT: retq
+ %out = call <4 x i64> @llvm.ctpop.v4i64(<4 x i64> %in)
+ ret <4 x i64> %out
+}
+
+define <8 x i32> @testv8i32(<8 x i32> %in) {
+; AVX1-LABEL: testv8i32:
+; AVX1: # BB#0:
+; AVX1-NEXT: vextractf128 $1, %ymm0, %xmm1
+; AVX1-NEXT: vpextrd $1, %xmm1, %eax
+; AVX1-NEXT: movl %eax, %ecx
+; AVX1-NEXT: shrl %ecx
+; AVX1-NEXT: andl $1431655765, %ecx # imm = 0x55555555
+; AVX1-NEXT: subl %ecx, %eax
+; AVX1-NEXT: movl %eax, %ecx
+; AVX1-NEXT: andl $858993459, %ecx # imm = 0x33333333
+; AVX1-NEXT: shrl $2, %eax
+; AVX1-NEXT: andl $858993459, %eax # imm = 0x33333333
+; AVX1-NEXT: addl %ecx, %eax
+; AVX1-NEXT: movl %eax, %ecx
+; AVX1-NEXT: shrl $4, %ecx
+; AVX1-NEXT: addl %eax, %ecx
+; AVX1-NEXT: andl $252645135, %ecx # imm = 0xF0F0F0F
+; AVX1-NEXT: imull $16843009, %ecx, %eax # imm = 0x1010101
+; AVX1-NEXT: shrl $24, %eax
+; AVX1-NEXT: vmovd %xmm1, %ecx
+; AVX1-NEXT: movl %ecx, %edx
+; AVX1-NEXT: shrl %edx
+; AVX1-NEXT: andl $1431655765, %edx # imm = 0x55555555
+; AVX1-NEXT: subl %edx, %ecx
+; AVX1-NEXT: movl %ecx, %edx
+; AVX1-NEXT: andl $858993459, %edx # imm = 0x33333333
+; AVX1-NEXT: shrl $2, %ecx
+; AVX1-NEXT: andl $858993459, %ecx # imm = 0x33333333
+; AVX1-NEXT: addl %edx, %ecx
+; AVX1-NEXT: movl %ecx, %edx
+; AVX1-NEXT: shrl $4, %edx
+; AVX1-NEXT: addl %ecx, %edx
+; AVX1-NEXT: andl $252645135, %edx # imm = 0xF0F0F0F
+; AVX1-NEXT: imull $16843009, %edx, %ecx # imm = 0x1010101
+; AVX1-NEXT: shrl $24, %ecx
+; AVX1-NEXT: vmovd %ecx, %xmm2
+; AVX1-NEXT: vpinsrd $1, %eax, %xmm2, %xmm2
+; AVX1-NEXT: vpextrd $2, %xmm1, %eax
+; AVX1-NEXT: movl %eax, %ecx
+; AVX1-NEXT: shrl %ecx
+; AVX1-NEXT: andl $1431655765, %ecx # imm = 0x55555555
+; AVX1-NEXT: subl %ecx, %eax
+; AVX1-NEXT: movl %eax, %ecx
+; AVX1-NEXT: andl $858993459, %ecx # imm = 0x33333333
+; AVX1-NEXT: shrl $2, %eax
+; AVX1-NEXT: andl $858993459, %eax # imm = 0x33333333
+; AVX1-NEXT: addl %ecx, %eax
+; AVX1-NEXT: movl %eax, %ecx
+; AVX1-NEXT: shrl $4, %ecx
+; AVX1-NEXT: addl %eax, %ecx
+; AVX1-NEXT: andl $252645135, %ecx # imm = 0xF0F0F0F
+; AVX1-NEXT: imull $16843009, %ecx, %eax # imm = 0x1010101
+; AVX1-NEXT: shrl $24, %eax
+; AVX1-NEXT: vpinsrd $2, %eax, %xmm2, %xmm2
+; AVX1-NEXT: vpextrd $3, %xmm1, %eax
+; AVX1-NEXT: movl %eax, %ecx
+; AVX1-NEXT: shrl %ecx
+; AVX1-NEXT: andl $1431655765, %ecx # imm = 0x55555555
+; AVX1-NEXT: subl %ecx, %eax
+; AVX1-NEXT: movl %eax, %ecx
+; AVX1-NEXT: andl $858993459, %ecx # imm = 0x33333333
+; AVX1-NEXT: shrl $2, %eax
+; AVX1-NEXT: andl $858993459, %eax # imm = 0x33333333
+; AVX1-NEXT: addl %ecx, %eax
+; AVX1-NEXT: movl %eax, %ecx
+; AVX1-NEXT: shrl $4, %ecx
+; AVX1-NEXT: addl %eax, %ecx
+; AVX1-NEXT: andl $252645135, %ecx # imm = 0xF0F0F0F
+; AVX1-NEXT: imull $16843009, %ecx, %eax # imm = 0x1010101
+; AVX1-NEXT: shrl $24, %eax
+; AVX1-NEXT: vpinsrd $3, %eax, %xmm2, %xmm1
+; AVX1-NEXT: vpextrd $1, %xmm0, %eax
+; AVX1-NEXT: movl %eax, %ecx
+; AVX1-NEXT: shrl %ecx
+; AVX1-NEXT: andl $1431655765, %ecx # imm = 0x55555555
+; AVX1-NEXT: subl %ecx, %eax
+; AVX1-NEXT: movl %eax, %ecx
+; AVX1-NEXT: andl $858993459, %ecx # imm = 0x33333333
+; AVX1-NEXT: shrl $2, %eax
+; AVX1-NEXT: andl $858993459, %eax # imm = 0x33333333
+; AVX1-NEXT: addl %ecx, %eax
+; AVX1-NEXT: movl %eax, %ecx
+; AVX1-NEXT: shrl $4, %ecx
+; AVX1-NEXT: addl %eax, %ecx
+; AVX1-NEXT: andl $252645135, %ecx # imm = 0xF0F0F0F
+; AVX1-NEXT: imull $16843009, %ecx, %eax # imm = 0x1010101
+; AVX1-NEXT: shrl $24, %eax
+; AVX1-NEXT: vmovd %xmm0, %ecx
+; AVX1-NEXT: movl %ecx, %edx
+; AVX1-NEXT: shrl %edx
+; AVX1-NEXT: andl $1431655765, %edx # imm = 0x55555555
+; AVX1-NEXT: subl %edx, %ecx
+; AVX1-NEXT: movl %ecx, %edx
+; AVX1-NEXT: andl $858993459, %edx # imm = 0x33333333
+; AVX1-NEXT: shrl $2, %ecx
+; AVX1-NEXT: andl $858993459, %ecx # imm = 0x33333333
+; AVX1-NEXT: addl %edx, %ecx
+; AVX1-NEXT: movl %ecx, %edx
+; AVX1-NEXT: shrl $4, %edx
+; AVX1-NEXT: addl %ecx, %edx
+; AVX1-NEXT: andl $252645135, %edx # imm = 0xF0F0F0F
+; AVX1-NEXT: imull $16843009, %edx, %ecx # imm = 0x1010101
+; AVX1-NEXT: shrl $24, %ecx
+; AVX1-NEXT: vmovd %ecx, %xmm2
+; AVX1-NEXT: vpinsrd $1, %eax, %xmm2, %xmm2
+; AVX1-NEXT: vpextrd $2, %xmm0, %eax
+; AVX1-NEXT: movl %eax, %ecx
+; AVX1-NEXT: shrl %ecx
+; AVX1-NEXT: andl $1431655765, %ecx # imm = 0x55555555
+; AVX1-NEXT: subl %ecx, %eax
+; AVX1-NEXT: movl %eax, %ecx
+; AVX1-NEXT: andl $858993459, %ecx # imm = 0x33333333
+; AVX1-NEXT: shrl $2, %eax
+; AVX1-NEXT: andl $858993459, %eax # imm = 0x33333333
+; AVX1-NEXT: addl %ecx, %eax
+; AVX1-NEXT: movl %eax, %ecx
+; AVX1-NEXT: shrl $4, %ecx
+; AVX1-NEXT: addl %eax, %ecx
+; AVX1-NEXT: andl $252645135, %ecx # imm = 0xF0F0F0F
+; AVX1-NEXT: imull $16843009, %ecx, %eax # imm = 0x1010101
+; AVX1-NEXT: shrl $24, %eax
+; AVX1-NEXT: vpinsrd $2, %eax, %xmm2, %xmm2
+; AVX1-NEXT: vpextrd $3, %xmm0, %eax
+; AVX1-NEXT: movl %eax, %ecx
+; AVX1-NEXT: shrl %ecx
+; AVX1-NEXT: andl $1431655765, %ecx # imm = 0x55555555
+; AVX1-NEXT: subl %ecx, %eax
+; AVX1-NEXT: movl %eax, %ecx
+; AVX1-NEXT: andl $858993459, %ecx # imm = 0x33333333
+; AVX1-NEXT: shrl $2, %eax
+; AVX1-NEXT: andl $858993459, %eax # imm = 0x33333333
+; AVX1-NEXT: addl %ecx, %eax
+; AVX1-NEXT: movl %eax, %ecx
+; AVX1-NEXT: shrl $4, %ecx
+; AVX1-NEXT: addl %eax, %ecx
+; AVX1-NEXT: andl $252645135, %ecx # imm = 0xF0F0F0F
+; AVX1-NEXT: imull $16843009, %ecx, %eax # imm = 0x1010101
+; AVX1-NEXT: shrl $24, %eax
+; AVX1-NEXT: vpinsrd $3, %eax, %xmm2, %xmm0
+; AVX1-NEXT: vinsertf128 $1, %xmm1, %ymm0, %ymm0
+; AVX1-NEXT: retq
+;
+; AVX2-LABEL: testv8i32:
+; AVX2: # BB#0:
+; AVX2-NEXT: vpsrld $1, %ymm0, %ymm1
+; AVX2-NEXT: vpbroadcastd {{.*}}(%rip), %ymm2
+; AVX2-NEXT: vpand %ymm2, %ymm1, %ymm1
+; AVX2-NEXT: vpsubd %ymm1, %ymm0, %ymm0
+; AVX2-NEXT: vpbroadcastd {{.*}}(%rip), %ymm1
+; AVX2-NEXT: vpand %ymm1, %ymm0, %ymm2
+; AVX2-NEXT: vpsrld $2, %ymm0, %ymm0
+; AVX2-NEXT: vpand %ymm1, %ymm0, %ymm0
+; AVX2-NEXT: vpaddd %ymm0, %ymm2, %ymm0
+; AVX2-NEXT: vpsrld $4, %ymm0, %ymm1
+; AVX2-NEXT: vpaddd %ymm1, %ymm0, %ymm0
+; AVX2-NEXT: vpbroadcastd {{.*}}(%rip), %ymm1
+; AVX2-NEXT: vpand %ymm1, %ymm0, %ymm0
+; AVX2-NEXT: vpsrld $8, %ymm0, %ymm1
+; AVX2-NEXT: vpaddd %ymm1, %ymm0, %ymm0
+; AVX2-NEXT: vpsrld $16, %ymm0, %ymm1
+; AVX2-NEXT: vpaddd %ymm1, %ymm0, %ymm0
+; AVX2-NEXT: vpbroadcastd {{.*}}(%rip), %ymm1
+; AVX2-NEXT: vpand %ymm1, %ymm0, %ymm0
+; AVX2-NEXT: retq
+ %out = call <8 x i32> @llvm.ctpop.v8i32(<8 x i32> %in)
+ ret <8 x i32> %out
+}
+
+define <16 x i16> @testv16i16(<16 x i16> %in) {
+; AVX1-LABEL: testv16i16:
+; AVX1: # BB#0:
+; AVX1-NEXT: vextractf128 $1, %ymm0, %xmm1
+; AVX1-NEXT: vpextrw $1, %xmm1, %eax
+; AVX1-NEXT: movl %eax, %ecx
+; AVX1-NEXT: shrl %ecx
+; AVX1-NEXT: andl $21845, %ecx # imm = 0x5555
+; AVX1-NEXT: subl %ecx, %eax
+; AVX1-NEXT: movl %eax, %ecx
+; AVX1-NEXT: andl $13107, %ecx # imm = 0x3333
+; AVX1-NEXT: shrl $2, %eax
+; AVX1-NEXT: andl $13107, %eax # imm = 0x3333
+; AVX1-NEXT: addl %ecx, %eax
+; AVX1-NEXT: movl %eax, %ecx
+; AVX1-NEXT: andl $65520, %ecx # imm = 0xFFF0
+; AVX1-NEXT: shrl $4, %ecx
+; AVX1-NEXT: addl %eax, %ecx
+; AVX1-NEXT: andl $3855, %ecx # imm = 0xF0F
+; AVX1-NEXT: imull $257, %ecx, %eax # imm = 0x101
+; AVX1-NEXT: movzbl %ah, %eax # NOREX
+; AVX1-NEXT: vmovd %xmm1, %ecx
+; AVX1-NEXT: movl %ecx, %edx
+; AVX1-NEXT: shrl %edx
+; AVX1-NEXT: andl $21845, %edx # imm = 0x5555
+; AVX1-NEXT: subl %edx, %ecx
+; AVX1-NEXT: movl %ecx, %edx
+; AVX1-NEXT: andl $13107, %edx # imm = 0x3333
+; AVX1-NEXT: shrl $2, %ecx
+; AVX1-NEXT: andl $13107, %ecx # imm = 0x3333
+; AVX1-NEXT: addl %edx, %ecx
+; AVX1-NEXT: movl %ecx, %edx
+; AVX1-NEXT: andl $65520, %edx # imm = 0xFFF0
+; AVX1-NEXT: shrl $4, %edx
+; AVX1-NEXT: addl %ecx, %edx
+; AVX1-NEXT: andl $3855, %edx # imm = 0xF0F
+; AVX1-NEXT: imull $257, %edx, %ecx # imm = 0x101
+; AVX1-NEXT: movzbl %ch, %ecx # NOREX
+; AVX1-NEXT: vmovd %ecx, %xmm2
+; AVX1-NEXT: vpinsrw $1, %eax, %xmm2, %xmm2
+; AVX1-NEXT: vpextrw $2, %xmm1, %eax
+; AVX1-NEXT: movl %eax, %ecx
+; AVX1-NEXT: shrl %ecx
+; AVX1-NEXT: andl $21845, %ecx # imm = 0x5555
+; AVX1-NEXT: subl %ecx, %eax
+; AVX1-NEXT: movl %eax, %ecx
+; AVX1-NEXT: andl $13107, %ecx # imm = 0x3333
+; AVX1-NEXT: shrl $2, %eax
+; AVX1-NEXT: andl $13107, %eax # imm = 0x3333
+; AVX1-NEXT: addl %ecx, %eax
+; AVX1-NEXT: movl %eax, %ecx
+; AVX1-NEXT: andl $65520, %ecx # imm = 0xFFF0
+; AVX1-NEXT: shrl $4, %ecx
+; AVX1-NEXT: addl %eax, %ecx
+; AVX1-NEXT: andl $3855, %ecx # imm = 0xF0F
+; AVX1-NEXT: imull $257, %ecx, %eax # imm = 0x101
+; AVX1-NEXT: movzbl %ah, %eax # NOREX
+; AVX1-NEXT: vpinsrw $2, %eax, %xmm2, %xmm2
+; AVX1-NEXT: vpextrw $3, %xmm1, %eax
+; AVX1-NEXT: movl %eax, %ecx
+; AVX1-NEXT: shrl %ecx
+; AVX1-NEXT: andl $21845, %ecx # imm = 0x5555
+; AVX1-NEXT: subl %ecx, %eax
+; AVX1-NEXT: movl %eax, %ecx
+; AVX1-NEXT: andl $13107, %ecx # imm = 0x3333
+; AVX1-NEXT: shrl $2, %eax
+; AVX1-NEXT: andl $13107, %eax # imm = 0x3333
+; AVX1-NEXT: addl %ecx, %eax
+; AVX1-NEXT: movl %eax, %ecx
+; AVX1-NEXT: andl $65520, %ecx # imm = 0xFFF0
+; AVX1-NEXT: shrl $4, %ecx
+; AVX1-NEXT: addl %eax, %ecx
+; AVX1-NEXT: andl $3855, %ecx # imm = 0xF0F
+; AVX1-NEXT: imull $257, %ecx, %eax # imm = 0x101
+; AVX1-NEXT: movzbl %ah, %eax # NOREX
+; AVX1-NEXT: vpinsrw $3, %eax, %xmm2, %xmm2
+; AVX1-NEXT: vpextrw $4, %xmm1, %eax
+; AVX1-NEXT: movl %eax, %ecx
+; AVX1-NEXT: shrl %ecx
+; AVX1-NEXT: andl $21845, %ecx # imm = 0x5555
+; AVX1-NEXT: subl %ecx, %eax
+; AVX1-NEXT: movl %eax, %ecx
+; AVX1-NEXT: andl $13107, %ecx # imm = 0x3333
+; AVX1-NEXT: shrl $2, %eax
+; AVX1-NEXT: andl $13107, %eax # imm = 0x3333
+; AVX1-NEXT: addl %ecx, %eax
+; AVX1-NEXT: movl %eax, %ecx
+; AVX1-NEXT: andl $65520, %ecx # imm = 0xFFF0
+; AVX1-NEXT: shrl $4, %ecx
+; AVX1-NEXT: addl %eax, %ecx
+; AVX1-NEXT: andl $3855, %ecx # imm = 0xF0F
+; AVX1-NEXT: imull $257, %ecx, %eax # imm = 0x101
+; AVX1-NEXT: movzbl %ah, %eax # NOREX
+; AVX1-NEXT: vpinsrw $4, %eax, %xmm2, %xmm2
+; AVX1-NEXT: vpextrw $5, %xmm1, %eax
+; AVX1-NEXT: movl %eax, %ecx
+; AVX1-NEXT: shrl %ecx
+; AVX1-NEXT: andl $21845, %ecx # imm = 0x5555
+; AVX1-NEXT: subl %ecx, %eax
+; AVX1-NEXT: movl %eax, %ecx
+; AVX1-NEXT: andl $13107, %ecx # imm = 0x3333
+; AVX1-NEXT: shrl $2, %eax
+; AVX1-NEXT: andl $13107, %eax # imm = 0x3333
+; AVX1-NEXT: addl %ecx, %eax
+; AVX1-NEXT: movl %eax, %ecx
+; AVX1-NEXT: andl $65520, %ecx # imm = 0xFFF0
+; AVX1-NEXT: shrl $4, %ecx
+; AVX1-NEXT: addl %eax, %ecx
+; AVX1-NEXT: andl $3855, %ecx # imm = 0xF0F
+; AVX1-NEXT: imull $257, %ecx, %eax # imm = 0x101
+; AVX1-NEXT: movzbl %ah, %eax # NOREX
+; AVX1-NEXT: vpinsrw $5, %eax, %xmm2, %xmm2
+; AVX1-NEXT: vpextrw $6, %xmm1, %eax
+; AVX1-NEXT: movl %eax, %ecx
+; AVX1-NEXT: shrl %ecx
+; AVX1-NEXT: andl $21845, %ecx # imm = 0x5555
+; AVX1-NEXT: subl %ecx, %eax
+; AVX1-NEXT: movl %eax, %ecx
+; AVX1-NEXT: andl $13107, %ecx # imm = 0x3333
+; AVX1-NEXT: shrl $2, %eax
+; AVX1-NEXT: andl $13107, %eax # imm = 0x3333
+; AVX1-NEXT: addl %ecx, %eax
+; AVX1-NEXT: movl %eax, %ecx
+; AVX1-NEXT: andl $65520, %ecx # imm = 0xFFF0
+; AVX1-NEXT: shrl $4, %ecx
+; AVX1-NEXT: addl %eax, %ecx
+; AVX1-NEXT: andl $3855, %ecx # imm = 0xF0F
+; AVX1-NEXT: imull $257, %ecx, %eax # imm = 0x101
+; AVX1-NEXT: movzbl %ah, %eax # NOREX
+; AVX1-NEXT: vpinsrw $6, %eax, %xmm2, %xmm2
+; AVX1-NEXT: vpextrw $7, %xmm1, %eax
+; AVX1-NEXT: movl %eax, %ecx
+; AVX1-NEXT: shrl %ecx
+; AVX1-NEXT: andl $21845, %ecx # imm = 0x5555
+; AVX1-NEXT: subl %ecx, %eax
+; AVX1-NEXT: movl %eax, %ecx
+; AVX1-NEXT: andl $13107, %ecx # imm = 0x3333
+; AVX1-NEXT: shrl $2, %eax
+; AVX1-NEXT: andl $13107, %eax # imm = 0x3333
+; AVX1-NEXT: addl %ecx, %eax
+; AVX1-NEXT: movl %eax, %ecx
+; AVX1-NEXT: andl $65520, %ecx # imm = 0xFFF0
+; AVX1-NEXT: shrl $4, %ecx
+; AVX1-NEXT: addl %eax, %ecx
+; AVX1-NEXT: andl $3855, %ecx # imm = 0xF0F
+; AVX1-NEXT: imull $257, %ecx, %eax # imm = 0x101
+; AVX1-NEXT: movzbl %ah, %eax # NOREX
+; AVX1-NEXT: vpinsrw $7, %eax, %xmm2, %xmm1
+; AVX1-NEXT: vpextrw $1, %xmm0, %eax
+; AVX1-NEXT: movl %eax, %ecx
+; AVX1-NEXT: shrl %ecx
+; AVX1-NEXT: andl $21845, %ecx # imm = 0x5555
+; AVX1-NEXT: subl %ecx, %eax
+; AVX1-NEXT: movl %eax, %ecx
+; AVX1-NEXT: andl $13107, %ecx # imm = 0x3333
+; AVX1-NEXT: shrl $2, %eax
+; AVX1-NEXT: andl $13107, %eax # imm = 0x3333
+; AVX1-NEXT: addl %ecx, %eax
+; AVX1-NEXT: movl %eax, %ecx
+; AVX1-NEXT: andl $65520, %ecx # imm = 0xFFF0
+; AVX1-NEXT: shrl $4, %ecx
+; AVX1-NEXT: addl %eax, %ecx
+; AVX1-NEXT: andl $3855, %ecx # imm = 0xF0F
+; AVX1-NEXT: imull $257, %ecx, %eax # imm = 0x101
+; AVX1-NEXT: movzbl %ah, %eax # NOREX
+; AVX1-NEXT: vmovd %xmm0, %ecx
+; AVX1-NEXT: movl %ecx, %edx
+; AVX1-NEXT: shrl %edx
+; AVX1-NEXT: andl $21845, %edx # imm = 0x5555
+; AVX1-NEXT: subl %edx, %ecx
+; AVX1-NEXT: movl %ecx, %edx
+; AVX1-NEXT: andl $13107, %edx # imm = 0x3333
+; AVX1-NEXT: shrl $2, %ecx
+; AVX1-NEXT: andl $13107, %ecx # imm = 0x3333
+; AVX1-NEXT: addl %edx, %ecx
+; AVX1-NEXT: movl %ecx, %edx
+; AVX1-NEXT: andl $65520, %edx # imm = 0xFFF0
+; AVX1-NEXT: shrl $4, %edx
+; AVX1-NEXT: addl %ecx, %edx
+; AVX1-NEXT: andl $3855, %edx # imm = 0xF0F
+; AVX1-NEXT: imull $257, %edx, %ecx # imm = 0x101
+; AVX1-NEXT: movzbl %ch, %ecx # NOREX
+; AVX1-NEXT: vmovd %ecx, %xmm2
+; AVX1-NEXT: vpinsrw $1, %eax, %xmm2, %xmm2
+; AVX1-NEXT: vpextrw $2, %xmm0, %eax
+; AVX1-NEXT: movl %eax, %ecx
+; AVX1-NEXT: shrl %ecx
+; AVX1-NEXT: andl $21845, %ecx # imm = 0x5555
+; AVX1-NEXT: subl %ecx, %eax
+; AVX1-NEXT: movl %eax, %ecx
+; AVX1-NEXT: andl $13107, %ecx # imm = 0x3333
+; AVX1-NEXT: shrl $2, %eax
+; AVX1-NEXT: andl $13107, %eax # imm = 0x3333
+; AVX1-NEXT: addl %ecx, %eax
+; AVX1-NEXT: movl %eax, %ecx
+; AVX1-NEXT: andl $65520, %ecx # imm = 0xFFF0
+; AVX1-NEXT: shrl $4, %ecx
+; AVX1-NEXT: addl %eax, %ecx
+; AVX1-NEXT: andl $3855, %ecx # imm = 0xF0F
+; AVX1-NEXT: imull $257, %ecx, %eax # imm = 0x101
+; AVX1-NEXT: movzbl %ah, %eax # NOREX
+; AVX1-NEXT: vpinsrw $2, %eax, %xmm2, %xmm2
+; AVX1-NEXT: vpextrw $3, %xmm0, %eax
+; AVX1-NEXT: movl %eax, %ecx
+; AVX1-NEXT: shrl %ecx
+; AVX1-NEXT: andl $21845, %ecx # imm = 0x5555
+; AVX1-NEXT: subl %ecx, %eax
+; AVX1-NEXT: movl %eax, %ecx
+; AVX1-NEXT: andl $13107, %ecx # imm = 0x3333
+; AVX1-NEXT: shrl $2, %eax
+; AVX1-NEXT: andl $13107, %eax # imm = 0x3333
+; AVX1-NEXT: addl %ecx, %eax
+; AVX1-NEXT: movl %eax, %ecx
+; AVX1-NEXT: andl $65520, %ecx # imm = 0xFFF0
+; AVX1-NEXT: shrl $4, %ecx
+; AVX1-NEXT: addl %eax, %ecx
+; AVX1-NEXT: andl $3855, %ecx # imm = 0xF0F
+; AVX1-NEXT: imull $257, %ecx, %eax # imm = 0x101
+; AVX1-NEXT: movzbl %ah, %eax # NOREX
+; AVX1-NEXT: vpinsrw $3, %eax, %xmm2, %xmm2
+; AVX1-NEXT: vpextrw $4, %xmm0, %eax
+; AVX1-NEXT: movl %eax, %ecx
+; AVX1-NEXT: shrl %ecx
+; AVX1-NEXT: andl $21845, %ecx # imm = 0x5555
+; AVX1-NEXT: subl %ecx, %eax
+; AVX1-NEXT: movl %eax, %ecx
+; AVX1-NEXT: andl $13107, %ecx # imm = 0x3333
+; AVX1-NEXT: shrl $2, %eax
+; AVX1-NEXT: andl $13107, %eax # imm = 0x3333
+; AVX1-NEXT: addl %ecx, %eax
+; AVX1-NEXT: movl %eax, %ecx
+; AVX1-NEXT: andl $65520, %ecx # imm = 0xFFF0
+; AVX1-NEXT: shrl $4, %ecx
+; AVX1-NEXT: addl %eax, %ecx
+; AVX1-NEXT: andl $3855, %ecx # imm = 0xF0F
+; AVX1-NEXT: imull $257, %ecx, %eax # imm = 0x101
+; AVX1-NEXT: movzbl %ah, %eax # NOREX
+; AVX1-NEXT: vpinsrw $4, %eax, %xmm2, %xmm2
+; AVX1-NEXT: vpextrw $5, %xmm0, %eax
+; AVX1-NEXT: movl %eax, %ecx
+; AVX1-NEXT: shrl %ecx
+; AVX1-NEXT: andl $21845, %ecx # imm = 0x5555
+; AVX1-NEXT: subl %ecx, %eax
+; AVX1-NEXT: movl %eax, %ecx
+; AVX1-NEXT: andl $13107, %ecx # imm = 0x3333
+; AVX1-NEXT: shrl $2, %eax
+; AVX1-NEXT: andl $13107, %eax # imm = 0x3333
+; AVX1-NEXT: addl %ecx, %eax
+; AVX1-NEXT: movl %eax, %ecx
+; AVX1-NEXT: andl $65520, %ecx # imm = 0xFFF0
+; AVX1-NEXT: shrl $4, %ecx
+; AVX1-NEXT: addl %eax, %ecx
+; AVX1-NEXT: andl $3855, %ecx # imm = 0xF0F
+; AVX1-NEXT: imull $257, %ecx, %eax # imm = 0x101
+; AVX1-NEXT: movzbl %ah, %eax # NOREX
+; AVX1-NEXT: vpinsrw $5, %eax, %xmm2, %xmm2
+; AVX1-NEXT: vpextrw $6, %xmm0, %eax
+; AVX1-NEXT: movl %eax, %ecx
+; AVX1-NEXT: shrl %ecx
+; AVX1-NEXT: andl $21845, %ecx # imm = 0x5555
+; AVX1-NEXT: subl %ecx, %eax
+; AVX1-NEXT: movl %eax, %ecx
+; AVX1-NEXT: andl $13107, %ecx # imm = 0x3333
+; AVX1-NEXT: shrl $2, %eax
+; AVX1-NEXT: andl $13107, %eax # imm = 0x3333
+; AVX1-NEXT: addl %ecx, %eax
+; AVX1-NEXT: movl %eax, %ecx
+; AVX1-NEXT: andl $65520, %ecx # imm = 0xFFF0
+; AVX1-NEXT: shrl $4, %ecx
+; AVX1-NEXT: addl %eax, %ecx
+; AVX1-NEXT: andl $3855, %ecx # imm = 0xF0F
+; AVX1-NEXT: imull $257, %ecx, %eax # imm = 0x101
+; AVX1-NEXT: movzbl %ah, %eax # NOREX
+; AVX1-NEXT: vpinsrw $6, %eax, %xmm2, %xmm2
+; AVX1-NEXT: vpextrw $7, %xmm0, %eax
+; AVX1-NEXT: movl %eax, %ecx
+; AVX1-NEXT: shrl %ecx
+; AVX1-NEXT: andl $21845, %ecx # imm = 0x5555
+; AVX1-NEXT: subl %ecx, %eax
+; AVX1-NEXT: movl %eax, %ecx
+; AVX1-NEXT: andl $13107, %ecx # imm = 0x3333
+; AVX1-NEXT: shrl $2, %eax
+; AVX1-NEXT: andl $13107, %eax # imm = 0x3333
+; AVX1-NEXT: addl %ecx, %eax
+; AVX1-NEXT: movl %eax, %ecx
+; AVX1-NEXT: andl $65520, %ecx # imm = 0xFFF0
+; AVX1-NEXT: shrl $4, %ecx
+; AVX1-NEXT: addl %eax, %ecx
+; AVX1-NEXT: andl $3855, %ecx # imm = 0xF0F
+; AVX1-NEXT: imull $257, %ecx, %eax # imm = 0x101
+; AVX1-NEXT: movzbl %ah, %eax # NOREX
+; AVX1-NEXT: vpinsrw $7, %eax, %xmm2, %xmm0
+; AVX1-NEXT: vinsertf128 $1, %xmm1, %ymm0, %ymm0
+; AVX1-NEXT: retq
+;
+; AVX2-LABEL: testv16i16:
+; AVX2: # BB#0:
+; AVX2-NEXT: vextracti128 $1, %ymm0, %xmm1
+; AVX2-NEXT: vpextrw $1, %xmm1, %eax
+; AVX2-NEXT: movl %eax, %ecx
+; AVX2-NEXT: shrl %ecx
+; AVX2-NEXT: andl $21845, %ecx # imm = 0x5555
+; AVX2-NEXT: subl %ecx, %eax
+; AVX2-NEXT: movl %eax, %ecx
+; AVX2-NEXT: andl $13107, %ecx # imm = 0x3333
+; AVX2-NEXT: shrl $2, %eax
+; AVX2-NEXT: andl $13107, %eax # imm = 0x3333
+; AVX2-NEXT: addl %ecx, %eax
+; AVX2-NEXT: movl %eax, %ecx
+; AVX2-NEXT: andl $65520, %ecx # imm = 0xFFF0
+; AVX2-NEXT: shrl $4, %ecx
+; AVX2-NEXT: addl %eax, %ecx
+; AVX2-NEXT: andl $3855, %ecx # imm = 0xF0F
+; AVX2-NEXT: imull $257, %ecx, %eax # imm = 0x101
+; AVX2-NEXT: movzbl %ah, %eax # NOREX
+; AVX2-NEXT: vmovd %xmm1, %ecx
+; AVX2-NEXT: movl %ecx, %edx
+; AVX2-NEXT: shrl %edx
+; AVX2-NEXT: andl $21845, %edx # imm = 0x5555
+; AVX2-NEXT: subl %edx, %ecx
+; AVX2-NEXT: movl %ecx, %edx
+; AVX2-NEXT: andl $13107, %edx # imm = 0x3333
+; AVX2-NEXT: shrl $2, %ecx
+; AVX2-NEXT: andl $13107, %ecx # imm = 0x3333
+; AVX2-NEXT: addl %edx, %ecx
+; AVX2-NEXT: movl %ecx, %edx
+; AVX2-NEXT: andl $65520, %edx # imm = 0xFFF0
+; AVX2-NEXT: shrl $4, %edx
+; AVX2-NEXT: addl %ecx, %edx
+; AVX2-NEXT: andl $3855, %edx # imm = 0xF0F
+; AVX2-NEXT: imull $257, %edx, %ecx # imm = 0x101
+; AVX2-NEXT: movzbl %ch, %ecx # NOREX
+; AVX2-NEXT: vmovd %ecx, %xmm2
+; AVX2-NEXT: vpinsrw $1, %eax, %xmm2, %xmm2
+; AVX2-NEXT: vpextrw $2, %xmm1, %eax
+; AVX2-NEXT: movl %eax, %ecx
+; AVX2-NEXT: shrl %ecx
+; AVX2-NEXT: andl $21845, %ecx # imm = 0x5555
+; AVX2-NEXT: subl %ecx, %eax
+; AVX2-NEXT: movl %eax, %ecx
+; AVX2-NEXT: andl $13107, %ecx # imm = 0x3333
+; AVX2-NEXT: shrl $2, %eax
+; AVX2-NEXT: andl $13107, %eax # imm = 0x3333
+; AVX2-NEXT: addl %ecx, %eax
+; AVX2-NEXT: movl %eax, %ecx
+; AVX2-NEXT: andl $65520, %ecx # imm = 0xFFF0
+; AVX2-NEXT: shrl $4, %ecx
+; AVX2-NEXT: addl %eax, %ecx
+; AVX2-NEXT: andl $3855, %ecx # imm = 0xF0F
+; AVX2-NEXT: imull $257, %ecx, %eax # imm = 0x101
+; AVX2-NEXT: movzbl %ah, %eax # NOREX
+; AVX2-NEXT: vpinsrw $2, %eax, %xmm2, %xmm2
+; AVX2-NEXT: vpextrw $3, %xmm1, %eax
+; AVX2-NEXT: movl %eax, %ecx
+; AVX2-NEXT: shrl %ecx
+; AVX2-NEXT: andl $21845, %ecx # imm = 0x5555
+; AVX2-NEXT: subl %ecx, %eax
+; AVX2-NEXT: movl %eax, %ecx
+; AVX2-NEXT: andl $13107, %ecx # imm = 0x3333
+; AVX2-NEXT: shrl $2, %eax
+; AVX2-NEXT: andl $13107, %eax # imm = 0x3333
+; AVX2-NEXT: addl %ecx, %eax
+; AVX2-NEXT: movl %eax, %ecx
+; AVX2-NEXT: andl $65520, %ecx # imm = 0xFFF0
+; AVX2-NEXT: shrl $4, %ecx
+; AVX2-NEXT: addl %eax, %ecx
+; AVX2-NEXT: andl $3855, %ecx # imm = 0xF0F
+; AVX2-NEXT: imull $257, %ecx, %eax # imm = 0x101
+; AVX2-NEXT: movzbl %ah, %eax # NOREX
+; AVX2-NEXT: vpinsrw $3, %eax, %xmm2, %xmm2
+; AVX2-NEXT: vpextrw $4, %xmm1, %eax
+; AVX2-NEXT: movl %eax, %ecx
+; AVX2-NEXT: shrl %ecx
+; AVX2-NEXT: andl $21845, %ecx # imm = 0x5555
+; AVX2-NEXT: subl %ecx, %eax
+; AVX2-NEXT: movl %eax, %ecx
+; AVX2-NEXT: andl $13107, %ecx # imm = 0x3333
+; AVX2-NEXT: shrl $2, %eax
+; AVX2-NEXT: andl $13107, %eax # imm = 0x3333
+; AVX2-NEXT: addl %ecx, %eax
+; AVX2-NEXT: movl %eax, %ecx
+; AVX2-NEXT: andl $65520, %ecx # imm = 0xFFF0
+; AVX2-NEXT: shrl $4, %ecx
+; AVX2-NEXT: addl %eax, %ecx
+; AVX2-NEXT: andl $3855, %ecx # imm = 0xF0F
+; AVX2-NEXT: imull $257, %ecx, %eax # imm = 0x101
+; AVX2-NEXT: movzbl %ah, %eax # NOREX
+; AVX2-NEXT: vpinsrw $4, %eax, %xmm2, %xmm2
+; AVX2-NEXT: vpextrw $5, %xmm1, %eax
+; AVX2-NEXT: movl %eax, %ecx
+; AVX2-NEXT: shrl %ecx
+; AVX2-NEXT: andl $21845, %ecx # imm = 0x5555
+; AVX2-NEXT: subl %ecx, %eax
+; AVX2-NEXT: movl %eax, %ecx
+; AVX2-NEXT: andl $13107, %ecx # imm = 0x3333
+; AVX2-NEXT: shrl $2, %eax
+; AVX2-NEXT: andl $13107, %eax # imm = 0x3333
+; AVX2-NEXT: addl %ecx, %eax
+; AVX2-NEXT: movl %eax, %ecx
+; AVX2-NEXT: andl $65520, %ecx # imm = 0xFFF0
+; AVX2-NEXT: shrl $4, %ecx
+; AVX2-NEXT: addl %eax, %ecx
+; AVX2-NEXT: andl $3855, %ecx # imm = 0xF0F
+; AVX2-NEXT: imull $257, %ecx, %eax # imm = 0x101
+; AVX2-NEXT: movzbl %ah, %eax # NOREX
+; AVX2-NEXT: vpinsrw $5, %eax, %xmm2, %xmm2
+; AVX2-NEXT: vpextrw $6, %xmm1, %eax
+; AVX2-NEXT: movl %eax, %ecx
+; AVX2-NEXT: shrl %ecx
+; AVX2-NEXT: andl $21845, %ecx # imm = 0x5555
+; AVX2-NEXT: subl %ecx, %eax
+; AVX2-NEXT: movl %eax, %ecx
+; AVX2-NEXT: andl $13107, %ecx # imm = 0x3333
+; AVX2-NEXT: shrl $2, %eax
+; AVX2-NEXT: andl $13107, %eax # imm = 0x3333
+; AVX2-NEXT: addl %ecx, %eax
+; AVX2-NEXT: movl %eax, %ecx
+; AVX2-NEXT: andl $65520, %ecx # imm = 0xFFF0
+; AVX2-NEXT: shrl $4, %ecx
+; AVX2-NEXT: addl %eax, %ecx
+; AVX2-NEXT: andl $3855, %ecx # imm = 0xF0F
+; AVX2-NEXT: imull $257, %ecx, %eax # imm = 0x101
+; AVX2-NEXT: movzbl %ah, %eax # NOREX
+; AVX2-NEXT: vpinsrw $6, %eax, %xmm2, %xmm2
+; AVX2-NEXT: vpextrw $7, %xmm1, %eax
+; AVX2-NEXT: movl %eax, %ecx
+; AVX2-NEXT: shrl %ecx
+; AVX2-NEXT: andl $21845, %ecx # imm = 0x5555
+; AVX2-NEXT: subl %ecx, %eax
+; AVX2-NEXT: movl %eax, %ecx
+; AVX2-NEXT: andl $13107, %ecx # imm = 0x3333
+; AVX2-NEXT: shrl $2, %eax
+; AVX2-NEXT: andl $13107, %eax # imm = 0x3333
+; AVX2-NEXT: addl %ecx, %eax
+; AVX2-NEXT: movl %eax, %ecx
+; AVX2-NEXT: andl $65520, %ecx # imm = 0xFFF0
+; AVX2-NEXT: shrl $4, %ecx
+; AVX2-NEXT: addl %eax, %ecx
+; AVX2-NEXT: andl $3855, %ecx # imm = 0xF0F
+; AVX2-NEXT: imull $257, %ecx, %eax # imm = 0x101
+; AVX2-NEXT: movzbl %ah, %eax # NOREX
+; AVX2-NEXT: vpinsrw $7, %eax, %xmm2, %xmm1
+; AVX2-NEXT: vpextrw $1, %xmm0, %eax
+; AVX2-NEXT: movl %eax, %ecx
+; AVX2-NEXT: shrl %ecx
+; AVX2-NEXT: andl $21845, %ecx # imm = 0x5555
+; AVX2-NEXT: subl %ecx, %eax
+; AVX2-NEXT: movl %eax, %ecx
+; AVX2-NEXT: andl $13107, %ecx # imm = 0x3333
+; AVX2-NEXT: shrl $2, %eax
+; AVX2-NEXT: andl $13107, %eax # imm = 0x3333
+; AVX2-NEXT: addl %ecx, %eax
+; AVX2-NEXT: movl %eax, %ecx
+; AVX2-NEXT: andl $65520, %ecx # imm = 0xFFF0
+; AVX2-NEXT: shrl $4, %ecx
+; AVX2-NEXT: addl %eax, %ecx
+; AVX2-NEXT: andl $3855, %ecx # imm = 0xF0F
+; AVX2-NEXT: imull $257, %ecx, %eax # imm = 0x101
+; AVX2-NEXT: movzbl %ah, %eax # NOREX
+; AVX2-NEXT: vmovd %xmm0, %ecx
+; AVX2-NEXT: movl %ecx, %edx
+; AVX2-NEXT: shrl %edx
+; AVX2-NEXT: andl $21845, %edx # imm = 0x5555
+; AVX2-NEXT: subl %edx, %ecx
+; AVX2-NEXT: movl %ecx, %edx
+; AVX2-NEXT: andl $13107, %edx # imm = 0x3333
+; AVX2-NEXT: shrl $2, %ecx
+; AVX2-NEXT: andl $13107, %ecx # imm = 0x3333
+; AVX2-NEXT: addl %edx, %ecx
+; AVX2-NEXT: movl %ecx, %edx
+; AVX2-NEXT: andl $65520, %edx # imm = 0xFFF0
+; AVX2-NEXT: shrl $4, %edx
+; AVX2-NEXT: addl %ecx, %edx
+; AVX2-NEXT: andl $3855, %edx # imm = 0xF0F
+; AVX2-NEXT: imull $257, %edx, %ecx # imm = 0x101
+; AVX2-NEXT: movzbl %ch, %ecx # NOREX
+; AVX2-NEXT: vmovd %ecx, %xmm2
+; AVX2-NEXT: vpinsrw $1, %eax, %xmm2, %xmm2
+; AVX2-NEXT: vpextrw $2, %xmm0, %eax
+; AVX2-NEXT: movl %eax, %ecx
+; AVX2-NEXT: shrl %ecx
+; AVX2-NEXT: andl $21845, %ecx # imm = 0x5555
+; AVX2-NEXT: subl %ecx, %eax
+; AVX2-NEXT: movl %eax, %ecx
+; AVX2-NEXT: andl $13107, %ecx # imm = 0x3333
+; AVX2-NEXT: shrl $2, %eax
+; AVX2-NEXT: andl $13107, %eax # imm = 0x3333
+; AVX2-NEXT: addl %ecx, %eax
+; AVX2-NEXT: movl %eax, %ecx
+; AVX2-NEXT: andl $65520, %ecx # imm = 0xFFF0
+; AVX2-NEXT: shrl $4, %ecx
+; AVX2-NEXT: addl %eax, %ecx
+; AVX2-NEXT: andl $3855, %ecx # imm = 0xF0F
+; AVX2-NEXT: imull $257, %ecx, %eax # imm = 0x101
+; AVX2-NEXT: movzbl %ah, %eax # NOREX
+; AVX2-NEXT: vpinsrw $2, %eax, %xmm2, %xmm2
+; AVX2-NEXT: vpextrw $3, %xmm0, %eax
+; AVX2-NEXT: movl %eax, %ecx
+; AVX2-NEXT: shrl %ecx
+; AVX2-NEXT: andl $21845, %ecx # imm = 0x5555
+; AVX2-NEXT: subl %ecx, %eax
+; AVX2-NEXT: movl %eax, %ecx
+; AVX2-NEXT: andl $13107, %ecx # imm = 0x3333
+; AVX2-NEXT: shrl $2, %eax
+; AVX2-NEXT: andl $13107, %eax # imm = 0x3333
+; AVX2-NEXT: addl %ecx, %eax
+; AVX2-NEXT: movl %eax, %ecx
+; AVX2-NEXT: andl $65520, %ecx # imm = 0xFFF0
+; AVX2-NEXT: shrl $4, %ecx
+; AVX2-NEXT: addl %eax, %ecx
+; AVX2-NEXT: andl $3855, %ecx # imm = 0xF0F
+; AVX2-NEXT: imull $257, %ecx, %eax # imm = 0x101
+; AVX2-NEXT: movzbl %ah, %eax # NOREX
+; AVX2-NEXT: vpinsrw $3, %eax, %xmm2, %xmm2
+; AVX2-NEXT: vpextrw $4, %xmm0, %eax
+; AVX2-NEXT: movl %eax, %ecx
+; AVX2-NEXT: shrl %ecx
+; AVX2-NEXT: andl $21845, %ecx # imm = 0x5555
+; AVX2-NEXT: subl %ecx, %eax
+; AVX2-NEXT: movl %eax, %ecx
+; AVX2-NEXT: andl $13107, %ecx # imm = 0x3333
+; AVX2-NEXT: shrl $2, %eax
+; AVX2-NEXT: andl $13107, %eax # imm = 0x3333
+; AVX2-NEXT: addl %ecx, %eax
+; AVX2-NEXT: movl %eax, %ecx
+; AVX2-NEXT: andl $65520, %ecx # imm = 0xFFF0
+; AVX2-NEXT: shrl $4, %ecx
+; AVX2-NEXT: addl %eax, %ecx
+; AVX2-NEXT: andl $3855, %ecx # imm = 0xF0F
+; AVX2-NEXT: imull $257, %ecx, %eax # imm = 0x101
+; AVX2-NEXT: movzbl %ah, %eax # NOREX
+; AVX2-NEXT: vpinsrw $4, %eax, %xmm2, %xmm2
+; AVX2-NEXT: vpextrw $5, %xmm0, %eax
+; AVX2-NEXT: movl %eax, %ecx
+; AVX2-NEXT: shrl %ecx
+; AVX2-NEXT: andl $21845, %ecx # imm = 0x5555
+; AVX2-NEXT: subl %ecx, %eax
+; AVX2-NEXT: movl %eax, %ecx
+; AVX2-NEXT: andl $13107, %ecx # imm = 0x3333
+; AVX2-NEXT: shrl $2, %eax
+; AVX2-NEXT: andl $13107, %eax # imm = 0x3333
+; AVX2-NEXT: addl %ecx, %eax
+; AVX2-NEXT: movl %eax, %ecx
+; AVX2-NEXT: andl $65520, %ecx # imm = 0xFFF0
+; AVX2-NEXT: shrl $4, %ecx
+; AVX2-NEXT: addl %eax, %ecx
+; AVX2-NEXT: andl $3855, %ecx # imm = 0xF0F
+; AVX2-NEXT: imull $257, %ecx, %eax # imm = 0x101
+; AVX2-NEXT: movzbl %ah, %eax # NOREX
+; AVX2-NEXT: vpinsrw $5, %eax, %xmm2, %xmm2
+; AVX2-NEXT: vpextrw $6, %xmm0, %eax
+; AVX2-NEXT: movl %eax, %ecx
+; AVX2-NEXT: shrl %ecx
+; AVX2-NEXT: andl $21845, %ecx # imm = 0x5555
+; AVX2-NEXT: subl %ecx, %eax
+; AVX2-NEXT: movl %eax, %ecx
+; AVX2-NEXT: andl $13107, %ecx # imm = 0x3333
+; AVX2-NEXT: shrl $2, %eax
+; AVX2-NEXT: andl $13107, %eax # imm = 0x3333
+; AVX2-NEXT: addl %ecx, %eax
+; AVX2-NEXT: movl %eax, %ecx
+; AVX2-NEXT: andl $65520, %ecx # imm = 0xFFF0
+; AVX2-NEXT: shrl $4, %ecx
+; AVX2-NEXT: addl %eax, %ecx
+; AVX2-NEXT: andl $3855, %ecx # imm = 0xF0F
+; AVX2-NEXT: imull $257, %ecx, %eax # imm = 0x101
+; AVX2-NEXT: movzbl %ah, %eax # NOREX
+; AVX2-NEXT: vpinsrw $6, %eax, %xmm2, %xmm2
+; AVX2-NEXT: vpextrw $7, %xmm0, %eax
+; AVX2-NEXT: movl %eax, %ecx
+; AVX2-NEXT: shrl %ecx
+; AVX2-NEXT: andl $21845, %ecx # imm = 0x5555
+; AVX2-NEXT: subl %ecx, %eax
+; AVX2-NEXT: movl %eax, %ecx
+; AVX2-NEXT: andl $13107, %ecx # imm = 0x3333
+; AVX2-NEXT: shrl $2, %eax
+; AVX2-NEXT: andl $13107, %eax # imm = 0x3333
+; AVX2-NEXT: addl %ecx, %eax
+; AVX2-NEXT: movl %eax, %ecx
+; AVX2-NEXT: andl $65520, %ecx # imm = 0xFFF0
+; AVX2-NEXT: shrl $4, %ecx
+; AVX2-NEXT: addl %eax, %ecx
+; AVX2-NEXT: andl $3855, %ecx # imm = 0xF0F
+; AVX2-NEXT: imull $257, %ecx, %eax # imm = 0x101
+; AVX2-NEXT: movzbl %ah, %eax # NOREX
+; AVX2-NEXT: vpinsrw $7, %eax, %xmm2, %xmm0
+; AVX2-NEXT: vinserti128 $1, %xmm1, %ymm0, %ymm0
+; AVX2-NEXT: retq
+ %out = call <16 x i16> @llvm.ctpop.v16i16(<16 x i16> %in)
+ ret <16 x i16> %out
+}
+
+define <32 x i8> @testv32i8(<32 x i8> %in) {
+; AVX1-LABEL: testv32i8:
+; AVX1: # BB#0:
+; AVX1-NEXT: vextractf128 $1, %ymm0, %xmm1
+; AVX1-NEXT: vpextrb $1, %xmm1, %eax
+; AVX1-NEXT: movb %al, %cl
+; AVX1-NEXT: shrb %cl
+; AVX1-NEXT: andb $85, %cl
+; AVX1-NEXT: subb %cl, %al
+; AVX1-NEXT: movb %al, %cl
+; AVX1-NEXT: andb $51, %cl
+; AVX1-NEXT: shrb $2, %al
+; AVX1-NEXT: andb $51, %al
+; AVX1-NEXT: addb %cl, %al
+; AVX1-NEXT: movb %al, %cl
+; AVX1-NEXT: shrb $4, %cl
+; AVX1-NEXT: addb %al, %cl
+; AVX1-NEXT: andb $15, %cl
+; AVX1-NEXT: movzbl %cl, %eax
+; AVX1-NEXT: vpextrb $0, %xmm1, %ecx
+; AVX1-NEXT: movb %cl, %dl
+; AVX1-NEXT: shrb %dl
+; AVX1-NEXT: andb $85, %dl
+; AVX1-NEXT: subb %dl, %cl
+; AVX1-NEXT: movb %cl, %dl
+; AVX1-NEXT: andb $51, %dl
+; AVX1-NEXT: shrb $2, %cl
+; AVX1-NEXT: andb $51, %cl
+; AVX1-NEXT: addb %dl, %cl
+; AVX1-NEXT: movb %cl, %dl
+; AVX1-NEXT: shrb $4, %dl
+; AVX1-NEXT: addb %cl, %dl
+; AVX1-NEXT: andb $15, %dl
+; AVX1-NEXT: movzbl %dl, %ecx
+; AVX1-NEXT: vmovd %ecx, %xmm2
+; AVX1-NEXT: vpinsrb $1, %eax, %xmm2, %xmm2
+; AVX1-NEXT: vpextrb $2, %xmm1, %eax
+; AVX1-NEXT: movb %al, %cl
+; AVX1-NEXT: shrb %cl
+; AVX1-NEXT: andb $85, %cl
+; AVX1-NEXT: subb %cl, %al
+; AVX1-NEXT: movb %al, %cl
+; AVX1-NEXT: andb $51, %cl
+; AVX1-NEXT: shrb $2, %al
+; AVX1-NEXT: andb $51, %al
+; AVX1-NEXT: addb %cl, %al
+; AVX1-NEXT: movb %al, %cl
+; AVX1-NEXT: shrb $4, %cl
+; AVX1-NEXT: addb %al, %cl
+; AVX1-NEXT: andb $15, %cl
+; AVX1-NEXT: movzbl %cl, %eax
+; AVX1-NEXT: vpinsrb $2, %eax, %xmm2, %xmm2
+; AVX1-NEXT: vpextrb $3, %xmm1, %eax
+; AVX1-NEXT: movb %al, %cl
+; AVX1-NEXT: shrb %cl
+; AVX1-NEXT: andb $85, %cl
+; AVX1-NEXT: subb %cl, %al
+; AVX1-NEXT: movb %al, %cl
+; AVX1-NEXT: andb $51, %cl
+; AVX1-NEXT: shrb $2, %al
+; AVX1-NEXT: andb $51, %al
+; AVX1-NEXT: addb %cl, %al
+; AVX1-NEXT: movb %al, %cl
+; AVX1-NEXT: shrb $4, %cl
+; AVX1-NEXT: addb %al, %cl
+; AVX1-NEXT: andb $15, %cl
+; AVX1-NEXT: movzbl %cl, %eax
+; AVX1-NEXT: vpinsrb $3, %eax, %xmm2, %xmm2
+; AVX1-NEXT: vpextrb $4, %xmm1, %eax
+; AVX1-NEXT: movb %al, %cl
+; AVX1-NEXT: shrb %cl
+; AVX1-NEXT: andb $85, %cl
+; AVX1-NEXT: subb %cl, %al
+; AVX1-NEXT: movb %al, %cl
+; AVX1-NEXT: andb $51, %cl
+; AVX1-NEXT: shrb $2, %al
+; AVX1-NEXT: andb $51, %al
+; AVX1-NEXT: addb %cl, %al
+; AVX1-NEXT: movb %al, %cl
+; AVX1-NEXT: shrb $4, %cl
+; AVX1-NEXT: addb %al, %cl
+; AVX1-NEXT: andb $15, %cl
+; AVX1-NEXT: movzbl %cl, %eax
+; AVX1-NEXT: vpinsrb $4, %eax, %xmm2, %xmm2
+; AVX1-NEXT: vpextrb $5, %xmm1, %eax
+; AVX1-NEXT: movb %al, %cl
+; AVX1-NEXT: shrb %cl
+; AVX1-NEXT: andb $85, %cl
+; AVX1-NEXT: subb %cl, %al
+; AVX1-NEXT: movb %al, %cl
+; AVX1-NEXT: andb $51, %cl
+; AVX1-NEXT: shrb $2, %al
+; AVX1-NEXT: andb $51, %al
+; AVX1-NEXT: addb %cl, %al
+; AVX1-NEXT: movb %al, %cl
+; AVX1-NEXT: shrb $4, %cl
+; AVX1-NEXT: addb %al, %cl
+; AVX1-NEXT: andb $15, %cl
+; AVX1-NEXT: movzbl %cl, %eax
+; AVX1-NEXT: vpinsrb $5, %eax, %xmm2, %xmm2
+; AVX1-NEXT: vpextrb $6, %xmm1, %eax
+; AVX1-NEXT: movb %al, %cl
+; AVX1-NEXT: shrb %cl
+; AVX1-NEXT: andb $85, %cl
+; AVX1-NEXT: subb %cl, %al
+; AVX1-NEXT: movb %al, %cl
+; AVX1-NEXT: andb $51, %cl
+; AVX1-NEXT: shrb $2, %al
+; AVX1-NEXT: andb $51, %al
+; AVX1-NEXT: addb %cl, %al
+; AVX1-NEXT: movb %al, %cl
+; AVX1-NEXT: shrb $4, %cl
+; AVX1-NEXT: addb %al, %cl
+; AVX1-NEXT: andb $15, %cl
+; AVX1-NEXT: movzbl %cl, %eax
+; AVX1-NEXT: vpinsrb $6, %eax, %xmm2, %xmm2
+; AVX1-NEXT: vpextrb $7, %xmm1, %eax
+; AVX1-NEXT: movb %al, %cl
+; AVX1-NEXT: shrb %cl
+; AVX1-NEXT: andb $85, %cl
+; AVX1-NEXT: subb %cl, %al
+; AVX1-NEXT: movb %al, %cl
+; AVX1-NEXT: andb $51, %cl
+; AVX1-NEXT: shrb $2, %al
+; AVX1-NEXT: andb $51, %al
+; AVX1-NEXT: addb %cl, %al
+; AVX1-NEXT: movb %al, %cl
+; AVX1-NEXT: shrb $4, %cl
+; AVX1-NEXT: addb %al, %cl
+; AVX1-NEXT: andb $15, %cl
+; AVX1-NEXT: movzbl %cl, %eax
+; AVX1-NEXT: vpinsrb $7, %eax, %xmm2, %xmm2
+; AVX1-NEXT: vpextrb $8, %xmm1, %eax
+; AVX1-NEXT: movb %al, %cl
+; AVX1-NEXT: shrb %cl
+; AVX1-NEXT: andb $85, %cl
+; AVX1-NEXT: subb %cl, %al
+; AVX1-NEXT: movb %al, %cl
+; AVX1-NEXT: andb $51, %cl
+; AVX1-NEXT: shrb $2, %al
+; AVX1-NEXT: andb $51, %al
+; AVX1-NEXT: addb %cl, %al
+; AVX1-NEXT: movb %al, %cl
+; AVX1-NEXT: shrb $4, %cl
+; AVX1-NEXT: addb %al, %cl
+; AVX1-NEXT: andb $15, %cl
+; AVX1-NEXT: movzbl %cl, %eax
+; AVX1-NEXT: vpinsrb $8, %eax, %xmm2, %xmm2
+; AVX1-NEXT: vpextrb $9, %xmm1, %eax
+; AVX1-NEXT: movb %al, %cl
+; AVX1-NEXT: shrb %cl
+; AVX1-NEXT: andb $85, %cl
+; AVX1-NEXT: subb %cl, %al
+; AVX1-NEXT: movb %al, %cl
+; AVX1-NEXT: andb $51, %cl
+; AVX1-NEXT: shrb $2, %al
+; AVX1-NEXT: andb $51, %al
+; AVX1-NEXT: addb %cl, %al
+; AVX1-NEXT: movb %al, %cl
+; AVX1-NEXT: shrb $4, %cl
+; AVX1-NEXT: addb %al, %cl
+; AVX1-NEXT: andb $15, %cl
+; AVX1-NEXT: movzbl %cl, %eax
+; AVX1-NEXT: vpinsrb $9, %eax, %xmm2, %xmm2
+; AVX1-NEXT: vpextrb $10, %xmm1, %eax
+; AVX1-NEXT: movb %al, %cl
+; AVX1-NEXT: shrb %cl
+; AVX1-NEXT: andb $85, %cl
+; AVX1-NEXT: subb %cl, %al
+; AVX1-NEXT: movb %al, %cl
+; AVX1-NEXT: andb $51, %cl
+; AVX1-NEXT: shrb $2, %al
+; AVX1-NEXT: andb $51, %al
+; AVX1-NEXT: addb %cl, %al
+; AVX1-NEXT: movb %al, %cl
+; AVX1-NEXT: shrb $4, %cl
+; AVX1-NEXT: addb %al, %cl
+; AVX1-NEXT: andb $15, %cl
+; AVX1-NEXT: movzbl %cl, %eax
+; AVX1-NEXT: vpinsrb $10, %eax, %xmm2, %xmm2
+; AVX1-NEXT: vpextrb $11, %xmm1, %eax
+; AVX1-NEXT: movb %al, %cl
+; AVX1-NEXT: shrb %cl
+; AVX1-NEXT: andb $85, %cl
+; AVX1-NEXT: subb %cl, %al
+; AVX1-NEXT: movb %al, %cl
+; AVX1-NEXT: andb $51, %cl
+; AVX1-NEXT: shrb $2, %al
+; AVX1-NEXT: andb $51, %al
+; AVX1-NEXT: addb %cl, %al
+; AVX1-NEXT: movb %al, %cl
+; AVX1-NEXT: shrb $4, %cl
+; AVX1-NEXT: addb %al, %cl
+; AVX1-NEXT: andb $15, %cl
+; AVX1-NEXT: movzbl %cl, %eax
+; AVX1-NEXT: vpinsrb $11, %eax, %xmm2, %xmm2
+; AVX1-NEXT: vpextrb $12, %xmm1, %eax
+; AVX1-NEXT: movb %al, %cl
+; AVX1-NEXT: shrb %cl
+; AVX1-NEXT: andb $85, %cl
+; AVX1-NEXT: subb %cl, %al
+; AVX1-NEXT: movb %al, %cl
+; AVX1-NEXT: andb $51, %cl
+; AVX1-NEXT: shrb $2, %al
+; AVX1-NEXT: andb $51, %al
+; AVX1-NEXT: addb %cl, %al
+; AVX1-NEXT: movb %al, %cl
+; AVX1-NEXT: shrb $4, %cl
+; AVX1-NEXT: addb %al, %cl
+; AVX1-NEXT: andb $15, %cl
+; AVX1-NEXT: movzbl %cl, %eax
+; AVX1-NEXT: vpinsrb $12, %eax, %xmm2, %xmm2
+; AVX1-NEXT: vpextrb $13, %xmm1, %eax
+; AVX1-NEXT: movb %al, %cl
+; AVX1-NEXT: shrb %cl
+; AVX1-NEXT: andb $85, %cl
+; AVX1-NEXT: subb %cl, %al
+; AVX1-NEXT: movb %al, %cl
+; AVX1-NEXT: andb $51, %cl
+; AVX1-NEXT: shrb $2, %al
+; AVX1-NEXT: andb $51, %al
+; AVX1-NEXT: addb %cl, %al
+; AVX1-NEXT: movb %al, %cl
+; AVX1-NEXT: shrb $4, %cl
+; AVX1-NEXT: addb %al, %cl
+; AVX1-NEXT: andb $15, %cl
+; AVX1-NEXT: movzbl %cl, %eax
+; AVX1-NEXT: vpinsrb $13, %eax, %xmm2, %xmm2
+; AVX1-NEXT: vpextrb $14, %xmm1, %eax
+; AVX1-NEXT: movb %al, %cl
+; AVX1-NEXT: shrb %cl
+; AVX1-NEXT: andb $85, %cl
+; AVX1-NEXT: subb %cl, %al
+; AVX1-NEXT: movb %al, %cl
+; AVX1-NEXT: andb $51, %cl
+; AVX1-NEXT: shrb $2, %al
+; AVX1-NEXT: andb $51, %al
+; AVX1-NEXT: addb %cl, %al
+; AVX1-NEXT: movb %al, %cl
+; AVX1-NEXT: shrb $4, %cl
+; AVX1-NEXT: addb %al, %cl
+; AVX1-NEXT: andb $15, %cl
+; AVX1-NEXT: movzbl %cl, %eax
+; AVX1-NEXT: vpinsrb $14, %eax, %xmm2, %xmm2
+; AVX1-NEXT: vpextrb $15, %xmm1, %eax
+; AVX1-NEXT: movb %al, %cl
+; AVX1-NEXT: shrb %cl
+; AVX1-NEXT: andb $85, %cl
+; AVX1-NEXT: subb %cl, %al
+; AVX1-NEXT: movb %al, %cl
+; AVX1-NEXT: andb $51, %cl
+; AVX1-NEXT: shrb $2, %al
+; AVX1-NEXT: andb $51, %al
+; AVX1-NEXT: addb %cl, %al
+; AVX1-NEXT: movb %al, %cl
+; AVX1-NEXT: shrb $4, %cl
+; AVX1-NEXT: addb %al, %cl
+; AVX1-NEXT: andb $15, %cl
+; AVX1-NEXT: movzbl %cl, %eax
+; AVX1-NEXT: vpinsrb $15, %eax, %xmm2, %xmm1
+; AVX1-NEXT: vpextrb $1, %xmm0, %eax
+; AVX1-NEXT: movb %al, %cl
+; AVX1-NEXT: shrb %cl
+; AVX1-NEXT: andb $85, %cl
+; AVX1-NEXT: subb %cl, %al
+; AVX1-NEXT: movb %al, %cl
+; AVX1-NEXT: andb $51, %cl
+; AVX1-NEXT: shrb $2, %al
+; AVX1-NEXT: andb $51, %al
+; AVX1-NEXT: addb %cl, %al
+; AVX1-NEXT: movb %al, %cl
+; AVX1-NEXT: shrb $4, %cl
+; AVX1-NEXT: addb %al, %cl
+; AVX1-NEXT: andb $15, %cl
+; AVX1-NEXT: movzbl %cl, %eax
+; AVX1-NEXT: vpextrb $0, %xmm0, %ecx
+; AVX1-NEXT: movb %cl, %dl
+; AVX1-NEXT: shrb %dl
+; AVX1-NEXT: andb $85, %dl
+; AVX1-NEXT: subb %dl, %cl
+; AVX1-NEXT: movb %cl, %dl
+; AVX1-NEXT: andb $51, %dl
+; AVX1-NEXT: shrb $2, %cl
+; AVX1-NEXT: andb $51, %cl
+; AVX1-NEXT: addb %dl, %cl
+; AVX1-NEXT: movb %cl, %dl
+; AVX1-NEXT: shrb $4, %dl
+; AVX1-NEXT: addb %cl, %dl
+; AVX1-NEXT: andb $15, %dl
+; AVX1-NEXT: movzbl %dl, %ecx
+; AVX1-NEXT: vmovd %ecx, %xmm2
+; AVX1-NEXT: vpinsrb $1, %eax, %xmm2, %xmm2
+; AVX1-NEXT: vpextrb $2, %xmm0, %eax
+; AVX1-NEXT: movb %al, %cl
+; AVX1-NEXT: shrb %cl
+; AVX1-NEXT: andb $85, %cl
+; AVX1-NEXT: subb %cl, %al
+; AVX1-NEXT: movb %al, %cl
+; AVX1-NEXT: andb $51, %cl
+; AVX1-NEXT: shrb $2, %al
+; AVX1-NEXT: andb $51, %al
+; AVX1-NEXT: addb %cl, %al
+; AVX1-NEXT: movb %al, %cl
+; AVX1-NEXT: shrb $4, %cl
+; AVX1-NEXT: addb %al, %cl
+; AVX1-NEXT: andb $15, %cl
+; AVX1-NEXT: movzbl %cl, %eax
+; AVX1-NEXT: vpinsrb $2, %eax, %xmm2, %xmm2
+; AVX1-NEXT: vpextrb $3, %xmm0, %eax
+; AVX1-NEXT: movb %al, %cl
+; AVX1-NEXT: shrb %cl
+; AVX1-NEXT: andb $85, %cl
+; AVX1-NEXT: subb %cl, %al
+; AVX1-NEXT: movb %al, %cl
+; AVX1-NEXT: andb $51, %cl
+; AVX1-NEXT: shrb $2, %al
+; AVX1-NEXT: andb $51, %al
+; AVX1-NEXT: addb %cl, %al
+; AVX1-NEXT: movb %al, %cl
+; AVX1-NEXT: shrb $4, %cl
+; AVX1-NEXT: addb %al, %cl
+; AVX1-NEXT: andb $15, %cl
+; AVX1-NEXT: movzbl %cl, %eax
+; AVX1-NEXT: vpinsrb $3, %eax, %xmm2, %xmm2
+; AVX1-NEXT: vpextrb $4, %xmm0, %eax
+; AVX1-NEXT: movb %al, %cl
+; AVX1-NEXT: shrb %cl
+; AVX1-NEXT: andb $85, %cl
+; AVX1-NEXT: subb %cl, %al
+; AVX1-NEXT: movb %al, %cl
+; AVX1-NEXT: andb $51, %cl
+; AVX1-NEXT: shrb $2, %al
+; AVX1-NEXT: andb $51, %al
+; AVX1-NEXT: addb %cl, %al
+; AVX1-NEXT: movb %al, %cl
+; AVX1-NEXT: shrb $4, %cl
+; AVX1-NEXT: addb %al, %cl
+; AVX1-NEXT: andb $15, %cl
+; AVX1-NEXT: movzbl %cl, %eax
+; AVX1-NEXT: vpinsrb $4, %eax, %xmm2, %xmm2
+; AVX1-NEXT: vpextrb $5, %xmm0, %eax
+; AVX1-NEXT: movb %al, %cl
+; AVX1-NEXT: shrb %cl
+; AVX1-NEXT: andb $85, %cl
+; AVX1-NEXT: subb %cl, %al
+; AVX1-NEXT: movb %al, %cl
+; AVX1-NEXT: andb $51, %cl
+; AVX1-NEXT: shrb $2, %al
+; AVX1-NEXT: andb $51, %al
+; AVX1-NEXT: addb %cl, %al
+; AVX1-NEXT: movb %al, %cl
+; AVX1-NEXT: shrb $4, %cl
+; AVX1-NEXT: addb %al, %cl
+; AVX1-NEXT: andb $15, %cl
+; AVX1-NEXT: movzbl %cl, %eax
+; AVX1-NEXT: vpinsrb $5, %eax, %xmm2, %xmm2
+; AVX1-NEXT: vpextrb $6, %xmm0, %eax
+; AVX1-NEXT: movb %al, %cl
+; AVX1-NEXT: shrb %cl
+; AVX1-NEXT: andb $85, %cl
+; AVX1-NEXT: subb %cl, %al
+; AVX1-NEXT: movb %al, %cl
+; AVX1-NEXT: andb $51, %cl
+; AVX1-NEXT: shrb $2, %al
+; AVX1-NEXT: andb $51, %al
+; AVX1-NEXT: addb %cl, %al
+; AVX1-NEXT: movb %al, %cl
+; AVX1-NEXT: shrb $4, %cl
+; AVX1-NEXT: addb %al, %cl
+; AVX1-NEXT: andb $15, %cl
+; AVX1-NEXT: movzbl %cl, %eax
+; AVX1-NEXT: vpinsrb $6, %eax, %xmm2, %xmm2
+; AVX1-NEXT: vpextrb $7, %xmm0, %eax
+; AVX1-NEXT: movb %al, %cl
+; AVX1-NEXT: shrb %cl
+; AVX1-NEXT: andb $85, %cl
+; AVX1-NEXT: subb %cl, %al
+; AVX1-NEXT: movb %al, %cl
+; AVX1-NEXT: andb $51, %cl
+; AVX1-NEXT: shrb $2, %al
+; AVX1-NEXT: andb $51, %al
+; AVX1-NEXT: addb %cl, %al
+; AVX1-NEXT: movb %al, %cl
+; AVX1-NEXT: shrb $4, %cl
+; AVX1-NEXT: addb %al, %cl
+; AVX1-NEXT: andb $15, %cl
+; AVX1-NEXT: movzbl %cl, %eax
+; AVX1-NEXT: vpinsrb $7, %eax, %xmm2, %xmm2
+; AVX1-NEXT: vpextrb $8, %xmm0, %eax
+; AVX1-NEXT: movb %al, %cl
+; AVX1-NEXT: shrb %cl
+; AVX1-NEXT: andb $85, %cl
+; AVX1-NEXT: subb %cl, %al
+; AVX1-NEXT: movb %al, %cl
+; AVX1-NEXT: andb $51, %cl
+; AVX1-NEXT: shrb $2, %al
+; AVX1-NEXT: andb $51, %al
+; AVX1-NEXT: addb %cl, %al
+; AVX1-NEXT: movb %al, %cl
+; AVX1-NEXT: shrb $4, %cl
+; AVX1-NEXT: addb %al, %cl
+; AVX1-NEXT: andb $15, %cl
+; AVX1-NEXT: movzbl %cl, %eax
+; AVX1-NEXT: vpinsrb $8, %eax, %xmm2, %xmm2
+; AVX1-NEXT: vpextrb $9, %xmm0, %eax
+; AVX1-NEXT: movb %al, %cl
+; AVX1-NEXT: shrb %cl
+; AVX1-NEXT: andb $85, %cl
+; AVX1-NEXT: subb %cl, %al
+; AVX1-NEXT: movb %al, %cl
+; AVX1-NEXT: andb $51, %cl
+; AVX1-NEXT: shrb $2, %al
+; AVX1-NEXT: andb $51, %al
+; AVX1-NEXT: addb %cl, %al
+; AVX1-NEXT: movb %al, %cl
+; AVX1-NEXT: shrb $4, %cl
+; AVX1-NEXT: addb %al, %cl
+; AVX1-NEXT: andb $15, %cl
+; AVX1-NEXT: movzbl %cl, %eax
+; AVX1-NEXT: vpinsrb $9, %eax, %xmm2, %xmm2
+; AVX1-NEXT: vpextrb $10, %xmm0, %eax
+; AVX1-NEXT: movb %al, %cl
+; AVX1-NEXT: shrb %cl
+; AVX1-NEXT: andb $85, %cl
+; AVX1-NEXT: subb %cl, %al
+; AVX1-NEXT: movb %al, %cl
+; AVX1-NEXT: andb $51, %cl
+; AVX1-NEXT: shrb $2, %al
+; AVX1-NEXT: andb $51, %al
+; AVX1-NEXT: addb %cl, %al
+; AVX1-NEXT: movb %al, %cl
+; AVX1-NEXT: shrb $4, %cl
+; AVX1-NEXT: addb %al, %cl
+; AVX1-NEXT: andb $15, %cl
+; AVX1-NEXT: movzbl %cl, %eax
+; AVX1-NEXT: vpinsrb $10, %eax, %xmm2, %xmm2
+; AVX1-NEXT: vpextrb $11, %xmm0, %eax
+; AVX1-NEXT: movb %al, %cl
+; AVX1-NEXT: shrb %cl
+; AVX1-NEXT: andb $85, %cl
+; AVX1-NEXT: subb %cl, %al
+; AVX1-NEXT: movb %al, %cl
+; AVX1-NEXT: andb $51, %cl
+; AVX1-NEXT: shrb $2, %al
+; AVX1-NEXT: andb $51, %al
+; AVX1-NEXT: addb %cl, %al
+; AVX1-NEXT: movb %al, %cl
+; AVX1-NEXT: shrb $4, %cl
+; AVX1-NEXT: addb %al, %cl
+; AVX1-NEXT: andb $15, %cl
+; AVX1-NEXT: movzbl %cl, %eax
+; AVX1-NEXT: vpinsrb $11, %eax, %xmm2, %xmm2
+; AVX1-NEXT: vpextrb $12, %xmm0, %eax
+; AVX1-NEXT: movb %al, %cl
+; AVX1-NEXT: shrb %cl
+; AVX1-NEXT: andb $85, %cl
+; AVX1-NEXT: subb %cl, %al
+; AVX1-NEXT: movb %al, %cl
+; AVX1-NEXT: andb $51, %cl
+; AVX1-NEXT: shrb $2, %al
+; AVX1-NEXT: andb $51, %al
+; AVX1-NEXT: addb %cl, %al
+; AVX1-NEXT: movb %al, %cl
+; AVX1-NEXT: shrb $4, %cl
+; AVX1-NEXT: addb %al, %cl
+; AVX1-NEXT: andb $15, %cl
+; AVX1-NEXT: movzbl %cl, %eax
+; AVX1-NEXT: vpinsrb $12, %eax, %xmm2, %xmm2
+; AVX1-NEXT: vpextrb $13, %xmm0, %eax
+; AVX1-NEXT: movb %al, %cl
+; AVX1-NEXT: shrb %cl
+; AVX1-NEXT: andb $85, %cl
+; AVX1-NEXT: subb %cl, %al
+; AVX1-NEXT: movb %al, %cl
+; AVX1-NEXT: andb $51, %cl
+; AVX1-NEXT: shrb $2, %al
+; AVX1-NEXT: andb $51, %al
+; AVX1-NEXT: addb %cl, %al
+; AVX1-NEXT: movb %al, %cl
+; AVX1-NEXT: shrb $4, %cl
+; AVX1-NEXT: addb %al, %cl
+; AVX1-NEXT: andb $15, %cl
+; AVX1-NEXT: movzbl %cl, %eax
+; AVX1-NEXT: vpinsrb $13, %eax, %xmm2, %xmm2
+; AVX1-NEXT: vpextrb $14, %xmm0, %eax
+; AVX1-NEXT: movb %al, %cl
+; AVX1-NEXT: shrb %cl
+; AVX1-NEXT: andb $85, %cl
+; AVX1-NEXT: subb %cl, %al
+; AVX1-NEXT: movb %al, %cl
+; AVX1-NEXT: andb $51, %cl
+; AVX1-NEXT: shrb $2, %al
+; AVX1-NEXT: andb $51, %al
+; AVX1-NEXT: addb %cl, %al
+; AVX1-NEXT: movb %al, %cl
+; AVX1-NEXT: shrb $4, %cl
+; AVX1-NEXT: addb %al, %cl
+; AVX1-NEXT: andb $15, %cl
+; AVX1-NEXT: movzbl %cl, %eax
+; AVX1-NEXT: vpinsrb $14, %eax, %xmm2, %xmm2
+; AVX1-NEXT: vpextrb $15, %xmm0, %eax
+; AVX1-NEXT: movb %al, %cl
+; AVX1-NEXT: shrb %cl
+; AVX1-NEXT: andb $85, %cl
+; AVX1-NEXT: subb %cl, %al
+; AVX1-NEXT: movb %al, %cl
+; AVX1-NEXT: andb $51, %cl
+; AVX1-NEXT: shrb $2, %al
+; AVX1-NEXT: andb $51, %al
+; AVX1-NEXT: addb %cl, %al
+; AVX1-NEXT: movb %al, %cl
+; AVX1-NEXT: shrb $4, %cl
+; AVX1-NEXT: addb %al, %cl
+; AVX1-NEXT: andb $15, %cl
+; AVX1-NEXT: movzbl %cl, %eax
+; AVX1-NEXT: vpinsrb $15, %eax, %xmm2, %xmm0
+; AVX1-NEXT: vinsertf128 $1, %xmm1, %ymm0, %ymm0
+; AVX1-NEXT: retq
+;
+; AVX2-LABEL: testv32i8:
+; AVX2: # BB#0:
+; AVX2-NEXT: vextracti128 $1, %ymm0, %xmm1
+; AVX2-NEXT: vpextrb $1, %xmm1, %eax
+; AVX2-NEXT: movb %al, %cl
+; AVX2-NEXT: shrb %cl
+; AVX2-NEXT: andb $85, %cl
+; AVX2-NEXT: subb %cl, %al
+; AVX2-NEXT: movb %al, %cl
+; AVX2-NEXT: andb $51, %cl
+; AVX2-NEXT: shrb $2, %al
+; AVX2-NEXT: andb $51, %al
+; AVX2-NEXT: addb %cl, %al
+; AVX2-NEXT: movb %al, %cl
+; AVX2-NEXT: shrb $4, %cl
+; AVX2-NEXT: addb %al, %cl
+; AVX2-NEXT: andb $15, %cl
+; AVX2-NEXT: movzbl %cl, %eax
+; AVX2-NEXT: vpextrb $0, %xmm1, %ecx
+; AVX2-NEXT: movb %cl, %dl
+; AVX2-NEXT: shrb %dl
+; AVX2-NEXT: andb $85, %dl
+; AVX2-NEXT: subb %dl, %cl
+; AVX2-NEXT: movb %cl, %dl
+; AVX2-NEXT: andb $51, %dl
+; AVX2-NEXT: shrb $2, %cl
+; AVX2-NEXT: andb $51, %cl
+; AVX2-NEXT: addb %dl, %cl
+; AVX2-NEXT: movb %cl, %dl
+; AVX2-NEXT: shrb $4, %dl
+; AVX2-NEXT: addb %cl, %dl
+; AVX2-NEXT: andb $15, %dl
+; AVX2-NEXT: movzbl %dl, %ecx
+; AVX2-NEXT: vmovd %ecx, %xmm2
+; AVX2-NEXT: vpinsrb $1, %eax, %xmm2, %xmm2
+; AVX2-NEXT: vpextrb $2, %xmm1, %eax
+; AVX2-NEXT: movb %al, %cl
+; AVX2-NEXT: shrb %cl
+; AVX2-NEXT: andb $85, %cl
+; AVX2-NEXT: subb %cl, %al
+; AVX2-NEXT: movb %al, %cl
+; AVX2-NEXT: andb $51, %cl
+; AVX2-NEXT: shrb $2, %al
+; AVX2-NEXT: andb $51, %al
+; AVX2-NEXT: addb %cl, %al
+; AVX2-NEXT: movb %al, %cl
+; AVX2-NEXT: shrb $4, %cl
+; AVX2-NEXT: addb %al, %cl
+; AVX2-NEXT: andb $15, %cl
+; AVX2-NEXT: movzbl %cl, %eax
+; AVX2-NEXT: vpinsrb $2, %eax, %xmm2, %xmm2
+; AVX2-NEXT: vpextrb $3, %xmm1, %eax
+; AVX2-NEXT: movb %al, %cl
+; AVX2-NEXT: shrb %cl
+; AVX2-NEXT: andb $85, %cl
+; AVX2-NEXT: subb %cl, %al
+; AVX2-NEXT: movb %al, %cl
+; AVX2-NEXT: andb $51, %cl
+; AVX2-NEXT: shrb $2, %al
+; AVX2-NEXT: andb $51, %al
+; AVX2-NEXT: addb %cl, %al
+; AVX2-NEXT: movb %al, %cl
+; AVX2-NEXT: shrb $4, %cl
+; AVX2-NEXT: addb %al, %cl
+; AVX2-NEXT: andb $15, %cl
+; AVX2-NEXT: movzbl %cl, %eax
+; AVX2-NEXT: vpinsrb $3, %eax, %xmm2, %xmm2
+; AVX2-NEXT: vpextrb $4, %xmm1, %eax
+; AVX2-NEXT: movb %al, %cl
+; AVX2-NEXT: shrb %cl
+; AVX2-NEXT: andb $85, %cl
+; AVX2-NEXT: subb %cl, %al
+; AVX2-NEXT: movb %al, %cl
+; AVX2-NEXT: andb $51, %cl
+; AVX2-NEXT: shrb $2, %al
+; AVX2-NEXT: andb $51, %al
+; AVX2-NEXT: addb %cl, %al
+; AVX2-NEXT: movb %al, %cl
+; AVX2-NEXT: shrb $4, %cl
+; AVX2-NEXT: addb %al, %cl
+; AVX2-NEXT: andb $15, %cl
+; AVX2-NEXT: movzbl %cl, %eax
+; AVX2-NEXT: vpinsrb $4, %eax, %xmm2, %xmm2
+; AVX2-NEXT: vpextrb $5, %xmm1, %eax
+; AVX2-NEXT: movb %al, %cl
+; AVX2-NEXT: shrb %cl
+; AVX2-NEXT: andb $85, %cl
+; AVX2-NEXT: subb %cl, %al
+; AVX2-NEXT: movb %al, %cl
+; AVX2-NEXT: andb $51, %cl
+; AVX2-NEXT: shrb $2, %al
+; AVX2-NEXT: andb $51, %al
+; AVX2-NEXT: addb %cl, %al
+; AVX2-NEXT: movb %al, %cl
+; AVX2-NEXT: shrb $4, %cl
+; AVX2-NEXT: addb %al, %cl
+; AVX2-NEXT: andb $15, %cl
+; AVX2-NEXT: movzbl %cl, %eax
+; AVX2-NEXT: vpinsrb $5, %eax, %xmm2, %xmm2
+; AVX2-NEXT: vpextrb $6, %xmm1, %eax
+; AVX2-NEXT: movb %al, %cl
+; AVX2-NEXT: shrb %cl
+; AVX2-NEXT: andb $85, %cl
+; AVX2-NEXT: subb %cl, %al
+; AVX2-NEXT: movb %al, %cl
+; AVX2-NEXT: andb $51, %cl
+; AVX2-NEXT: shrb $2, %al
+; AVX2-NEXT: andb $51, %al
+; AVX2-NEXT: addb %cl, %al
+; AVX2-NEXT: movb %al, %cl
+; AVX2-NEXT: shrb $4, %cl
+; AVX2-NEXT: addb %al, %cl
+; AVX2-NEXT: andb $15, %cl
+; AVX2-NEXT: movzbl %cl, %eax
+; AVX2-NEXT: vpinsrb $6, %eax, %xmm2, %xmm2
+; AVX2-NEXT: vpextrb $7, %xmm1, %eax
+; AVX2-NEXT: movb %al, %cl
+; AVX2-NEXT: shrb %cl
+; AVX2-NEXT: andb $85, %cl
+; AVX2-NEXT: subb %cl, %al
+; AVX2-NEXT: movb %al, %cl
+; AVX2-NEXT: andb $51, %cl
+; AVX2-NEXT: shrb $2, %al
+; AVX2-NEXT: andb $51, %al
+; AVX2-NEXT: addb %cl, %al
+; AVX2-NEXT: movb %al, %cl
+; AVX2-NEXT: shrb $4, %cl
+; AVX2-NEXT: addb %al, %cl
+; AVX2-NEXT: andb $15, %cl
+; AVX2-NEXT: movzbl %cl, %eax
+; AVX2-NEXT: vpinsrb $7, %eax, %xmm2, %xmm2
+; AVX2-NEXT: vpextrb $8, %xmm1, %eax
+; AVX2-NEXT: movb %al, %cl
+; AVX2-NEXT: shrb %cl
+; AVX2-NEXT: andb $85, %cl
+; AVX2-NEXT: subb %cl, %al
+; AVX2-NEXT: movb %al, %cl
+; AVX2-NEXT: andb $51, %cl
+; AVX2-NEXT: shrb $2, %al
+; AVX2-NEXT: andb $51, %al
+; AVX2-NEXT: addb %cl, %al
+; AVX2-NEXT: movb %al, %cl
+; AVX2-NEXT: shrb $4, %cl
+; AVX2-NEXT: addb %al, %cl
+; AVX2-NEXT: andb $15, %cl
+; AVX2-NEXT: movzbl %cl, %eax
+; AVX2-NEXT: vpinsrb $8, %eax, %xmm2, %xmm2
+; AVX2-NEXT: vpextrb $9, %xmm1, %eax
+; AVX2-NEXT: movb %al, %cl
+; AVX2-NEXT: shrb %cl
+; AVX2-NEXT: andb $85, %cl
+; AVX2-NEXT: subb %cl, %al
+; AVX2-NEXT: movb %al, %cl
+; AVX2-NEXT: andb $51, %cl
+; AVX2-NEXT: shrb $2, %al
+; AVX2-NEXT: andb $51, %al
+; AVX2-NEXT: addb %cl, %al
+; AVX2-NEXT: movb %al, %cl
+; AVX2-NEXT: shrb $4, %cl
+; AVX2-NEXT: addb %al, %cl
+; AVX2-NEXT: andb $15, %cl
+; AVX2-NEXT: movzbl %cl, %eax
+; AVX2-NEXT: vpinsrb $9, %eax, %xmm2, %xmm2
+; AVX2-NEXT: vpextrb $10, %xmm1, %eax
+; AVX2-NEXT: movb %al, %cl
+; AVX2-NEXT: shrb %cl
+; AVX2-NEXT: andb $85, %cl
+; AVX2-NEXT: subb %cl, %al
+; AVX2-NEXT: movb %al, %cl
+; AVX2-NEXT: andb $51, %cl
+; AVX2-NEXT: shrb $2, %al
+; AVX2-NEXT: andb $51, %al
+; AVX2-NEXT: addb %cl, %al
+; AVX2-NEXT: movb %al, %cl
+; AVX2-NEXT: shrb $4, %cl
+; AVX2-NEXT: addb %al, %cl
+; AVX2-NEXT: andb $15, %cl
+; AVX2-NEXT: movzbl %cl, %eax
+; AVX2-NEXT: vpinsrb $10, %eax, %xmm2, %xmm2
+; AVX2-NEXT: vpextrb $11, %xmm1, %eax
+; AVX2-NEXT: movb %al, %cl
+; AVX2-NEXT: shrb %cl
+; AVX2-NEXT: andb $85, %cl
+; AVX2-NEXT: subb %cl, %al
+; AVX2-NEXT: movb %al, %cl
+; AVX2-NEXT: andb $51, %cl
+; AVX2-NEXT: shrb $2, %al
+; AVX2-NEXT: andb $51, %al
+; AVX2-NEXT: addb %cl, %al
+; AVX2-NEXT: movb %al, %cl
+; AVX2-NEXT: shrb $4, %cl
+; AVX2-NEXT: addb %al, %cl
+; AVX2-NEXT: andb $15, %cl
+; AVX2-NEXT: movzbl %cl, %eax
+; AVX2-NEXT: vpinsrb $11, %eax, %xmm2, %xmm2
+; AVX2-NEXT: vpextrb $12, %xmm1, %eax
+; AVX2-NEXT: movb %al, %cl
+; AVX2-NEXT: shrb %cl
+; AVX2-NEXT: andb $85, %cl
+; AVX2-NEXT: subb %cl, %al
+; AVX2-NEXT: movb %al, %cl
+; AVX2-NEXT: andb $51, %cl
+; AVX2-NEXT: shrb $2, %al
+; AVX2-NEXT: andb $51, %al
+; AVX2-NEXT: addb %cl, %al
+; AVX2-NEXT: movb %al, %cl
+; AVX2-NEXT: shrb $4, %cl
+; AVX2-NEXT: addb %al, %cl
+; AVX2-NEXT: andb $15, %cl
+; AVX2-NEXT: movzbl %cl, %eax
+; AVX2-NEXT: vpinsrb $12, %eax, %xmm2, %xmm2
+; AVX2-NEXT: vpextrb $13, %xmm1, %eax
+; AVX2-NEXT: movb %al, %cl
+; AVX2-NEXT: shrb %cl
+; AVX2-NEXT: andb $85, %cl
+; AVX2-NEXT: subb %cl, %al
+; AVX2-NEXT: movb %al, %cl
+; AVX2-NEXT: andb $51, %cl
+; AVX2-NEXT: shrb $2, %al
+; AVX2-NEXT: andb $51, %al
+; AVX2-NEXT: addb %cl, %al
+; AVX2-NEXT: movb %al, %cl
+; AVX2-NEXT: shrb $4, %cl
+; AVX2-NEXT: addb %al, %cl
+; AVX2-NEXT: andb $15, %cl
+; AVX2-NEXT: movzbl %cl, %eax
+; AVX2-NEXT: vpinsrb $13, %eax, %xmm2, %xmm2
+; AVX2-NEXT: vpextrb $14, %xmm1, %eax
+; AVX2-NEXT: movb %al, %cl
+; AVX2-NEXT: shrb %cl
+; AVX2-NEXT: andb $85, %cl
+; AVX2-NEXT: subb %cl, %al
+; AVX2-NEXT: movb %al, %cl
+; AVX2-NEXT: andb $51, %cl
+; AVX2-NEXT: shrb $2, %al
+; AVX2-NEXT: andb $51, %al
+; AVX2-NEXT: addb %cl, %al
+; AVX2-NEXT: movb %al, %cl
+; AVX2-NEXT: shrb $4, %cl
+; AVX2-NEXT: addb %al, %cl
+; AVX2-NEXT: andb $15, %cl
+; AVX2-NEXT: movzbl %cl, %eax
+; AVX2-NEXT: vpinsrb $14, %eax, %xmm2, %xmm2
+; AVX2-NEXT: vpextrb $15, %xmm1, %eax
+; AVX2-NEXT: movb %al, %cl
+; AVX2-NEXT: shrb %cl
+; AVX2-NEXT: andb $85, %cl
+; AVX2-NEXT: subb %cl, %al
+; AVX2-NEXT: movb %al, %cl
+; AVX2-NEXT: andb $51, %cl
+; AVX2-NEXT: shrb $2, %al
+; AVX2-NEXT: andb $51, %al
+; AVX2-NEXT: addb %cl, %al
+; AVX2-NEXT: movb %al, %cl
+; AVX2-NEXT: shrb $4, %cl
+; AVX2-NEXT: addb %al, %cl
+; AVX2-NEXT: andb $15, %cl
+; AVX2-NEXT: movzbl %cl, %eax
+; AVX2-NEXT: vpinsrb $15, %eax, %xmm2, %xmm1
+; AVX2-NEXT: vpextrb $1, %xmm0, %eax
+; AVX2-NEXT: movb %al, %cl
+; AVX2-NEXT: shrb %cl
+; AVX2-NEXT: andb $85, %cl
+; AVX2-NEXT: subb %cl, %al
+; AVX2-NEXT: movb %al, %cl
+; AVX2-NEXT: andb $51, %cl
+; AVX2-NEXT: shrb $2, %al
+; AVX2-NEXT: andb $51, %al
+; AVX2-NEXT: addb %cl, %al
+; AVX2-NEXT: movb %al, %cl
+; AVX2-NEXT: shrb $4, %cl
+; AVX2-NEXT: addb %al, %cl
+; AVX2-NEXT: andb $15, %cl
+; AVX2-NEXT: movzbl %cl, %eax
+; AVX2-NEXT: vpextrb $0, %xmm0, %ecx
+; AVX2-NEXT: movb %cl, %dl
+; AVX2-NEXT: shrb %dl
+; AVX2-NEXT: andb $85, %dl
+; AVX2-NEXT: subb %dl, %cl
+; AVX2-NEXT: movb %cl, %dl
+; AVX2-NEXT: andb $51, %dl
+; AVX2-NEXT: shrb $2, %cl
+; AVX2-NEXT: andb $51, %cl
+; AVX2-NEXT: addb %dl, %cl
+; AVX2-NEXT: movb %cl, %dl
+; AVX2-NEXT: shrb $4, %dl
+; AVX2-NEXT: addb %cl, %dl
+; AVX2-NEXT: andb $15, %dl
+; AVX2-NEXT: movzbl %dl, %ecx
+; AVX2-NEXT: vmovd %ecx, %xmm2
+; AVX2-NEXT: vpinsrb $1, %eax, %xmm2, %xmm2
+; AVX2-NEXT: vpextrb $2, %xmm0, %eax
+; AVX2-NEXT: movb %al, %cl
+; AVX2-NEXT: shrb %cl
+; AVX2-NEXT: andb $85, %cl
+; AVX2-NEXT: subb %cl, %al
+; AVX2-NEXT: movb %al, %cl
+; AVX2-NEXT: andb $51, %cl
+; AVX2-NEXT: shrb $2, %al
+; AVX2-NEXT: andb $51, %al
+; AVX2-NEXT: addb %cl, %al
+; AVX2-NEXT: movb %al, %cl
+; AVX2-NEXT: shrb $4, %cl
+; AVX2-NEXT: addb %al, %cl
+; AVX2-NEXT: andb $15, %cl
+; AVX2-NEXT: movzbl %cl, %eax
+; AVX2-NEXT: vpinsrb $2, %eax, %xmm2, %xmm2
+; AVX2-NEXT: vpextrb $3, %xmm0, %eax
+; AVX2-NEXT: movb %al, %cl
+; AVX2-NEXT: shrb %cl
+; AVX2-NEXT: andb $85, %cl
+; AVX2-NEXT: subb %cl, %al
+; AVX2-NEXT: movb %al, %cl
+; AVX2-NEXT: andb $51, %cl
+; AVX2-NEXT: shrb $2, %al
+; AVX2-NEXT: andb $51, %al
+; AVX2-NEXT: addb %cl, %al
+; AVX2-NEXT: movb %al, %cl
+; AVX2-NEXT: shrb $4, %cl
+; AVX2-NEXT: addb %al, %cl
+; AVX2-NEXT: andb $15, %cl
+; AVX2-NEXT: movzbl %cl, %eax
+; AVX2-NEXT: vpinsrb $3, %eax, %xmm2, %xmm2
+; AVX2-NEXT: vpextrb $4, %xmm0, %eax
+; AVX2-NEXT: movb %al, %cl
+; AVX2-NEXT: shrb %cl
+; AVX2-NEXT: andb $85, %cl
+; AVX2-NEXT: subb %cl, %al
+; AVX2-NEXT: movb %al, %cl
+; AVX2-NEXT: andb $51, %cl
+; AVX2-NEXT: shrb $2, %al
+; AVX2-NEXT: andb $51, %al
+; AVX2-NEXT: addb %cl, %al
+; AVX2-NEXT: movb %al, %cl
+; AVX2-NEXT: shrb $4, %cl
+; AVX2-NEXT: addb %al, %cl
+; AVX2-NEXT: andb $15, %cl
+; AVX2-NEXT: movzbl %cl, %eax
+; AVX2-NEXT: vpinsrb $4, %eax, %xmm2, %xmm2
+; AVX2-NEXT: vpextrb $5, %xmm0, %eax
+; AVX2-NEXT: movb %al, %cl
+; AVX2-NEXT: shrb %cl
+; AVX2-NEXT: andb $85, %cl
+; AVX2-NEXT: subb %cl, %al
+; AVX2-NEXT: movb %al, %cl
+; AVX2-NEXT: andb $51, %cl
+; AVX2-NEXT: shrb $2, %al
+; AVX2-NEXT: andb $51, %al
+; AVX2-NEXT: addb %cl, %al
+; AVX2-NEXT: movb %al, %cl
+; AVX2-NEXT: shrb $4, %cl
+; AVX2-NEXT: addb %al, %cl
+; AVX2-NEXT: andb $15, %cl
+; AVX2-NEXT: movzbl %cl, %eax
+; AVX2-NEXT: vpinsrb $5, %eax, %xmm2, %xmm2
+; AVX2-NEXT: vpextrb $6, %xmm0, %eax
+; AVX2-NEXT: movb %al, %cl
+; AVX2-NEXT: shrb %cl
+; AVX2-NEXT: andb $85, %cl
+; AVX2-NEXT: subb %cl, %al
+; AVX2-NEXT: movb %al, %cl
+; AVX2-NEXT: andb $51, %cl
+; AVX2-NEXT: shrb $2, %al
+; AVX2-NEXT: andb $51, %al
+; AVX2-NEXT: addb %cl, %al
+; AVX2-NEXT: movb %al, %cl
+; AVX2-NEXT: shrb $4, %cl
+; AVX2-NEXT: addb %al, %cl
+; AVX2-NEXT: andb $15, %cl
+; AVX2-NEXT: movzbl %cl, %eax
+; AVX2-NEXT: vpinsrb $6, %eax, %xmm2, %xmm2
+; AVX2-NEXT: vpextrb $7, %xmm0, %eax
+; AVX2-NEXT: movb %al, %cl
+; AVX2-NEXT: shrb %cl
+; AVX2-NEXT: andb $85, %cl
+; AVX2-NEXT: subb %cl, %al
+; AVX2-NEXT: movb %al, %cl
+; AVX2-NEXT: andb $51, %cl
+; AVX2-NEXT: shrb $2, %al
+; AVX2-NEXT: andb $51, %al
+; AVX2-NEXT: addb %cl, %al
+; AVX2-NEXT: movb %al, %cl
+; AVX2-NEXT: shrb $4, %cl
+; AVX2-NEXT: addb %al, %cl
+; AVX2-NEXT: andb $15, %cl
+; AVX2-NEXT: movzbl %cl, %eax
+; AVX2-NEXT: vpinsrb $7, %eax, %xmm2, %xmm2
+; AVX2-NEXT: vpextrb $8, %xmm0, %eax
+; AVX2-NEXT: movb %al, %cl
+; AVX2-NEXT: shrb %cl
+; AVX2-NEXT: andb $85, %cl
+; AVX2-NEXT: subb %cl, %al
+; AVX2-NEXT: movb %al, %cl
+; AVX2-NEXT: andb $51, %cl
+; AVX2-NEXT: shrb $2, %al
+; AVX2-NEXT: andb $51, %al
+; AVX2-NEXT: addb %cl, %al
+; AVX2-NEXT: movb %al, %cl
+; AVX2-NEXT: shrb $4, %cl
+; AVX2-NEXT: addb %al, %cl
+; AVX2-NEXT: andb $15, %cl
+; AVX2-NEXT: movzbl %cl, %eax
+; AVX2-NEXT: vpinsrb $8, %eax, %xmm2, %xmm2
+; AVX2-NEXT: vpextrb $9, %xmm0, %eax
+; AVX2-NEXT: movb %al, %cl
+; AVX2-NEXT: shrb %cl
+; AVX2-NEXT: andb $85, %cl
+; AVX2-NEXT: subb %cl, %al
+; AVX2-NEXT: movb %al, %cl
+; AVX2-NEXT: andb $51, %cl
+; AVX2-NEXT: shrb $2, %al
+; AVX2-NEXT: andb $51, %al
+; AVX2-NEXT: addb %cl, %al
+; AVX2-NEXT: movb %al, %cl
+; AVX2-NEXT: shrb $4, %cl
+; AVX2-NEXT: addb %al, %cl
+; AVX2-NEXT: andb $15, %cl
+; AVX2-NEXT: movzbl %cl, %eax
+; AVX2-NEXT: vpinsrb $9, %eax, %xmm2, %xmm2
+; AVX2-NEXT: vpextrb $10, %xmm0, %eax
+; AVX2-NEXT: movb %al, %cl
+; AVX2-NEXT: shrb %cl
+; AVX2-NEXT: andb $85, %cl
+; AVX2-NEXT: subb %cl, %al
+; AVX2-NEXT: movb %al, %cl
+; AVX2-NEXT: andb $51, %cl
+; AVX2-NEXT: shrb $2, %al
+; AVX2-NEXT: andb $51, %al
+; AVX2-NEXT: addb %cl, %al
+; AVX2-NEXT: movb %al, %cl
+; AVX2-NEXT: shrb $4, %cl
+; AVX2-NEXT: addb %al, %cl
+; AVX2-NEXT: andb $15, %cl
+; AVX2-NEXT: movzbl %cl, %eax
+; AVX2-NEXT: vpinsrb $10, %eax, %xmm2, %xmm2
+; AVX2-NEXT: vpextrb $11, %xmm0, %eax
+; AVX2-NEXT: movb %al, %cl
+; AVX2-NEXT: shrb %cl
+; AVX2-NEXT: andb $85, %cl
+; AVX2-NEXT: subb %cl, %al
+; AVX2-NEXT: movb %al, %cl
+; AVX2-NEXT: andb $51, %cl
+; AVX2-NEXT: shrb $2, %al
+; AVX2-NEXT: andb $51, %al
+; AVX2-NEXT: addb %cl, %al
+; AVX2-NEXT: movb %al, %cl
+; AVX2-NEXT: shrb $4, %cl
+; AVX2-NEXT: addb %al, %cl
+; AVX2-NEXT: andb $15, %cl
+; AVX2-NEXT: movzbl %cl, %eax
+; AVX2-NEXT: vpinsrb $11, %eax, %xmm2, %xmm2
+; AVX2-NEXT: vpextrb $12, %xmm0, %eax
+; AVX2-NEXT: movb %al, %cl
+; AVX2-NEXT: shrb %cl
+; AVX2-NEXT: andb $85, %cl
+; AVX2-NEXT: subb %cl, %al
+; AVX2-NEXT: movb %al, %cl
+; AVX2-NEXT: andb $51, %cl
+; AVX2-NEXT: shrb $2, %al
+; AVX2-NEXT: andb $51, %al
+; AVX2-NEXT: addb %cl, %al
+; AVX2-NEXT: movb %al, %cl
+; AVX2-NEXT: shrb $4, %cl
+; AVX2-NEXT: addb %al, %cl
+; AVX2-NEXT: andb $15, %cl
+; AVX2-NEXT: movzbl %cl, %eax
+; AVX2-NEXT: vpinsrb $12, %eax, %xmm2, %xmm2
+; AVX2-NEXT: vpextrb $13, %xmm0, %eax
+; AVX2-NEXT: movb %al, %cl
+; AVX2-NEXT: shrb %cl
+; AVX2-NEXT: andb $85, %cl
+; AVX2-NEXT: subb %cl, %al
+; AVX2-NEXT: movb %al, %cl
+; AVX2-NEXT: andb $51, %cl
+; AVX2-NEXT: shrb $2, %al
+; AVX2-NEXT: andb $51, %al
+; AVX2-NEXT: addb %cl, %al
+; AVX2-NEXT: movb %al, %cl
+; AVX2-NEXT: shrb $4, %cl
+; AVX2-NEXT: addb %al, %cl
+; AVX2-NEXT: andb $15, %cl
+; AVX2-NEXT: movzbl %cl, %eax
+; AVX2-NEXT: vpinsrb $13, %eax, %xmm2, %xmm2
+; AVX2-NEXT: vpextrb $14, %xmm0, %eax
+; AVX2-NEXT: movb %al, %cl
+; AVX2-NEXT: shrb %cl
+; AVX2-NEXT: andb $85, %cl
+; AVX2-NEXT: subb %cl, %al
+; AVX2-NEXT: movb %al, %cl
+; AVX2-NEXT: andb $51, %cl
+; AVX2-NEXT: shrb $2, %al
+; AVX2-NEXT: andb $51, %al
+; AVX2-NEXT: addb %cl, %al
+; AVX2-NEXT: movb %al, %cl
+; AVX2-NEXT: shrb $4, %cl
+; AVX2-NEXT: addb %al, %cl
+; AVX2-NEXT: andb $15, %cl
+; AVX2-NEXT: movzbl %cl, %eax
+; AVX2-NEXT: vpinsrb $14, %eax, %xmm2, %xmm2
+; AVX2-NEXT: vpextrb $15, %xmm0, %eax
+; AVX2-NEXT: movb %al, %cl
+; AVX2-NEXT: shrb %cl
+; AVX2-NEXT: andb $85, %cl
+; AVX2-NEXT: subb %cl, %al
+; AVX2-NEXT: movb %al, %cl
+; AVX2-NEXT: andb $51, %cl
+; AVX2-NEXT: shrb $2, %al
+; AVX2-NEXT: andb $51, %al
+; AVX2-NEXT: addb %cl, %al
+; AVX2-NEXT: movb %al, %cl
+; AVX2-NEXT: shrb $4, %cl
+; AVX2-NEXT: addb %al, %cl
+; AVX2-NEXT: andb $15, %cl
+; AVX2-NEXT: movzbl %cl, %eax
+; AVX2-NEXT: vpinsrb $15, %eax, %xmm2, %xmm0
+; AVX2-NEXT: vinserti128 $1, %xmm1, %ymm0, %ymm0
+; AVX2-NEXT: retq
+ %out = call <32 x i8> @llvm.ctpop.v32i8(<32 x i8> %in)
+ ret <32 x i8> %out
+}
+
+declare <4 x i64> @llvm.ctpop.v4i64(<4 x i64>)
+declare <8 x i32> @llvm.ctpop.v8i32(<8 x i32>)
+declare <16 x i16> @llvm.ctpop.v16i16(<16 x i16>)
+declare <32 x i8> @llvm.ctpop.v32i8(<32 x i8>)