From 4af289d0f23fa65e77be51982c4bb0b9a5645512 Mon Sep 17 00:00:00 2001 From: Simon Pilgrim Date: Mon, 8 Jun 2015 19:58:43 +0000 Subject: [PATCH] [X86][SSE] Added lzcnt vector tests. llvm-svn: 239333 --- llvm/test/CodeGen/X86/vector-lzcnt-128.ll | 1786 +++++++++++++++++++++++++++++ llvm/test/CodeGen/X86/vector-lzcnt-256.ll | 1224 ++++++++++++++++++++ 2 files changed, 3010 insertions(+) diff --git a/llvm/test/CodeGen/X86/vector-lzcnt-128.ll b/llvm/test/CodeGen/X86/vector-lzcnt-128.ll index c0d8c53..b43188b 100644 --- a/llvm/test/CodeGen/X86/vector-lzcnt-128.ll +++ b/llvm/test/CodeGen/X86/vector-lzcnt-128.ll @@ -7,6 +7,1792 @@ target triple = "x86_64-unknown-unknown" +define <2 x i64> @testv2i64(<2 x i64> %in) { +; SSE2-LABEL: testv2i64: +; SSE2: # BB#0: +; SSE2-NEXT: movd %xmm0, %rax +; SSE2-NEXT: bsrq %rax, %rax +; SSE2-NEXT: movl $127, %ecx +; SSE2-NEXT: cmoveq %rcx, %rax +; SSE2-NEXT: xorq $63, %rax +; SSE2-NEXT: movd %rax, %xmm1 +; SSE2-NEXT: pshufd $78, %xmm0, %xmm0 # xmm0 = xmm0[2,3,0,1] +; SSE2-NEXT: movd %xmm0, %rax +; SSE2-NEXT: bsrq %rax, %rax +; SSE2-NEXT: cmoveq %rcx, %rax +; SSE2-NEXT: xorq $63, %rax +; SSE2-NEXT: movd %rax, %xmm0 +; SSE2-NEXT: punpcklqdq %xmm0, %xmm1 # xmm1 = xmm1[0],xmm0[0] +; SSE2-NEXT: movdqa %xmm1, %xmm0 +; SSE2-NEXT: retq +; +; SSE3-LABEL: testv2i64: +; SSE3: # BB#0: +; SSE3-NEXT: movd %xmm0, %rax +; SSE3-NEXT: bsrq %rax, %rax +; SSE3-NEXT: movl $127, %ecx +; SSE3-NEXT: cmoveq %rcx, %rax +; SSE3-NEXT: xorq $63, %rax +; SSE3-NEXT: movd %rax, %xmm1 +; SSE3-NEXT: pshufd $78, %xmm0, %xmm0 # xmm0 = xmm0[2,3,0,1] +; SSE3-NEXT: movd %xmm0, %rax +; SSE3-NEXT: bsrq %rax, %rax +; SSE3-NEXT: cmoveq %rcx, %rax +; SSE3-NEXT: xorq $63, %rax +; SSE3-NEXT: movd %rax, %xmm0 +; SSE3-NEXT: punpcklqdq %xmm0, %xmm1 # xmm1 = xmm1[0],xmm0[0] +; SSE3-NEXT: movdqa %xmm1, %xmm0 +; SSE3-NEXT: retq +; +; SSSE3-LABEL: testv2i64: +; SSSE3: # BB#0: +; SSSE3-NEXT: movd %xmm0, %rax +; SSSE3-NEXT: bsrq %rax, %rax +; SSSE3-NEXT: movl $127, %ecx +; SSSE3-NEXT: cmoveq %rcx, %rax +; SSSE3-NEXT: xorq $63, %rax +; SSSE3-NEXT: movd %rax, %xmm1 +; SSSE3-NEXT: pshufd $78, %xmm0, %xmm0 # xmm0 = xmm0[2,3,0,1] +; SSSE3-NEXT: movd %xmm0, %rax +; SSSE3-NEXT: bsrq %rax, %rax +; SSSE3-NEXT: cmoveq %rcx, %rax +; SSSE3-NEXT: xorq $63, %rax +; SSSE3-NEXT: movd %rax, %xmm0 +; SSSE3-NEXT: punpcklqdq %xmm0, %xmm1 # xmm1 = xmm1[0],xmm0[0] +; SSSE3-NEXT: movdqa %xmm1, %xmm0 +; SSSE3-NEXT: retq + +; +; SSE41-LABEL: testv2i64: +; SSE41: # BB#0: +; SSE41-NEXT: pextrq $1, %xmm0, %rax +; SSE41-NEXT: bsrq %rax, %rax +; SSE41-NEXT: movl $127, %ecx +; SSE41-NEXT: cmoveq %rcx, %rax +; SSE41-NEXT: xorq $63, %rax +; SSE41-NEXT: movd %rax, %xmm1 +; SSE41-NEXT: movd %xmm0, %rax +; SSE41-NEXT: bsrq %rax, %rax +; SSE41-NEXT: cmoveq %rcx, %rax +; SSE41-NEXT: xorq $63, %rax +; SSE41-NEXT: movd %rax, %xmm0 +; SSE41-NEXT: punpcklqdq {{.*#+}} xmm0 = xmm0[0],xmm1[0] +; SSE41-NEXT: retq +; +; AVX-LABEL: testv2i64: +; AVX: # BB#0: +; AVX-NEXT: vpextrq $1, %xmm0, %rax +; AVX-NEXT: bsrq %rax, %rax +; AVX-NEXT: movl $127, %ecx +; AVX-NEXT: cmoveq %rcx, %rax +; AVX-NEXT: xorq $63, %rax +; AVX-NEXT: vmovq %rax, %xmm1 +; AVX-NEXT: vmovq %xmm0, %rax +; AVX-NEXT: bsrq %rax, %rax +; AVX-NEXT: cmoveq %rcx, %rax +; AVX-NEXT: xorq $63, %rax +; AVX-NEXT: vmovq %rax, %xmm0 +; AVX-NEXT: vpunpcklqdq {{.*#+}} xmm0 = xmm0[0],xmm1[0] +; AVX-NEXT: retq + %out = call <2 x i64> @llvm.ctlz.v2i64(<2 x i64> %in, i1 0) + ret <2 x i64> %out +} + +define <2 x i64> @testv2i64u(<2 x i64> %in) { +; SSE2-LABEL: testv2i64u: +; SSE2: # BB#0: +; SSE2-NEXT: movd %xmm0, %rax +; SSE2-NEXT: bsrq %rax, %rax +; SSE2-NEXT: xorq $63, %rax +; SSE2-NEXT: movd %rax, %xmm1 +; SSE2-NEXT: pshufd {{.*#+}} xmm0 = xmm0[2,3,0,1] +; SSE2-NEXT: movd %xmm0, %rax +; SSE2-NEXT: bsrq %rax, %rax +; SSE2-NEXT: xorq $63, %rax +; SSE2-NEXT: movd %rax, %xmm0 +; SSE2-NEXT: punpcklqdq {{.*#+}} xmm1 = xmm1[0],xmm0[0] +; SSE2-NEXT: movdqa %xmm1, %xmm0 +; SSE2-NEXT: retq +; +; SSE3-LABEL: testv2i64u: +; SSE3: # BB#0: +; SSE3-NEXT: movd %xmm0, %rax +; SSE3-NEXT: bsrq %rax, %rax +; SSE3-NEXT: xorq $63, %rax +; SSE3-NEXT: movd %rax, %xmm1 +; SSE3-NEXT: pshufd {{.*#+}} xmm0 = xmm0[2,3,0,1] +; SSE3-NEXT: movd %xmm0, %rax +; SSE3-NEXT: bsrq %rax, %rax +; SSE3-NEXT: xorq $63, %rax +; SSE3-NEXT: movd %rax, %xmm0 +; SSE3-NEXT: punpcklqdq {{.*#+}} xmm1 = xmm1[0],xmm0[0] +; SSE3-NEXT: movdqa %xmm1, %xmm0 +; SSE3-NEXT: retq +; +; SSSE3-LABEL: testv2i64u: +; SSSE3: # BB#0: +; SSSE3-NEXT: movd %xmm0, %rax +; SSSE3-NEXT: bsrq %rax, %rax +; SSSE3-NEXT: xorq $63, %rax +; SSSE3-NEXT: movd %rax, %xmm1 +; SSSE3-NEXT: pshufd {{.*#+}} xmm0 = xmm0[2,3,0,1] +; SSSE3-NEXT: movd %xmm0, %rax +; SSSE3-NEXT: bsrq %rax, %rax +; SSSE3-NEXT: xorq $63, %rax +; SSSE3-NEXT: movd %rax, %xmm0 +; SSSE3-NEXT: punpcklqdq {{.*#+}} xmm1 = xmm1[0],xmm0[0] +; SSSE3-NEXT: movdqa %xmm1, %xmm0 +; SSSE3-NEXT: retq +; +; SSE41-LABEL: testv2i64u: +; SSE41: # BB#0: +; SSE41-NEXT: pextrq $1, %xmm0, %rax +; SSE41-NEXT: bsrq %rax, %rax +; SSE41-NEXT: xorq $63, %rax +; SSE41-NEXT: movd %rax, %xmm1 +; SSE41-NEXT: movd %xmm0, %rax +; SSE41-NEXT: bsrq %rax, %rax +; SSE41-NEXT: xorq $63, %rax +; SSE41-NEXT: movd %rax, %xmm0 +; SSE41-NEXT: punpcklqdq {{.*#+}} xmm0 = xmm0[0],xmm1[0] +; SSE41-NEXT: retq +; +; AVX-LABEL: testv2i64u: +; AVX: # BB#0: +; AVX-NEXT: vpextrq $1, %xmm0, %rax +; AVX-NEXT: bsrq %rax, %rax +; AVX-NEXT: xorq $63, %rax +; AVX-NEXT: vmovq %rax, %xmm1 +; AVX-NEXT: vmovq %xmm0, %rax +; AVX-NEXT: bsrq %rax, %rax +; AVX-NEXT: xorq $63, %rax +; AVX-NEXT: vmovq %rax, %xmm0 +; AVX-NEXT: vpunpcklqdq {{.*#+}} xmm0 = xmm0[0],xmm1[0] +; AVX-NEXT: retq + %out = call <2 x i64> @llvm.ctlz.v2i64(<2 x i64> %in, i1 -1) + ret <2 x i64> %out +} + +define <4 x i32> @testv4i32(<4 x i32> %in) { +; SSE2-LABEL: testv4i32: +; SSE2: # BB#0: +; SSE2-NEXT: pshufd {{.*#+}} xmm1 = xmm0[3,1,2,3] +; SSE2-NEXT: movd %xmm1, %eax +; SSE2-NEXT: bsrl %eax, %eax +; SSE2-NEXT: movl $63, %ecx +; SSE2-NEXT: cmovel %ecx, %eax +; SSE2-NEXT: xorl $31, %eax +; SSE2-NEXT: movd %eax, %xmm1 +; SSE2-NEXT: pshufd {{.*#+}} xmm2 = xmm0[1,1,2,3] +; SSE2-NEXT: movd %xmm2, %eax +; SSE2-NEXT: bsrl %eax, %eax +; SSE2-NEXT: cmovel %ecx, %eax +; SSE2-NEXT: xorl $31, %eax +; SSE2-NEXT: movd %eax, %xmm2 +; SSE2-NEXT: punpckldq {{.*#+}} xmm2 = xmm2[0],xmm1[0],xmm2[1],xmm1[1] +; SSE2-NEXT: movd %xmm0, %eax +; SSE2-NEXT: bsrl %eax, %eax +; SSE2-NEXT: cmovel %ecx, %eax +; SSE2-NEXT: xorl $31, %eax +; SSE2-NEXT: movd %eax, %xmm1 +; SSE2-NEXT: pshufd {{.*#+}} xmm0 = xmm0[2,3,0,1] +; SSE2-NEXT: movd %xmm0, %eax +; SSE2-NEXT: bsrl %eax, %eax +; SSE2-NEXT: cmovel %ecx, %eax +; SSE2-NEXT: xorl $31, %eax +; SSE2-NEXT: movd %eax, %xmm0 +; SSE2-NEXT: punpckldq {{.*#+}} xmm1 = xmm1[0],xmm0[0],xmm1[1],xmm0[1] +; SSE2-NEXT: punpckldq {{.*#+}} xmm1 = xmm1[0],xmm2[0],xmm1[1],xmm2[1] +; SSE2-NEXT: movdqa %xmm1, %xmm0 +; SSE2-NEXT: retq +; +; SSE3-LABEL: testv4i32: +; SSE3: # BB#0: +; SSE3-NEXT: pshufd {{.*#+}} xmm1 = xmm0[3,1,2,3] +; SSE3-NEXT: movd %xmm1, %eax +; SSE3-NEXT: bsrl %eax, %eax +; SSE3-NEXT: movl $63, %ecx +; SSE3-NEXT: cmovel %ecx, %eax +; SSE3-NEXT: xorl $31, %eax +; SSE3-NEXT: movd %eax, %xmm1 +; SSE3-NEXT: pshufd {{.*#+}} xmm2 = xmm0[1,1,2,3] +; SSE3-NEXT: movd %xmm2, %eax +; SSE3-NEXT: bsrl %eax, %eax +; SSE3-NEXT: cmovel %ecx, %eax +; SSE3-NEXT: xorl $31, %eax +; SSE3-NEXT: movd %eax, %xmm2 +; SSE3-NEXT: punpckldq {{.*#+}} xmm2 = xmm2[0],xmm1[0],xmm2[1],xmm1[1] +; SSE3-NEXT: movd %xmm0, %eax +; SSE3-NEXT: bsrl %eax, %eax +; SSE3-NEXT: cmovel %ecx, %eax +; SSE3-NEXT: xorl $31, %eax +; SSE3-NEXT: movd %eax, %xmm1 +; SSE3-NEXT: pshufd {{.*#+}} xmm0 = xmm0[2,3,0,1] +; SSE3-NEXT: movd %xmm0, %eax +; SSE3-NEXT: bsrl %eax, %eax +; SSE3-NEXT: cmovel %ecx, %eax +; SSE3-NEXT: xorl $31, %eax +; SSE3-NEXT: movd %eax, %xmm0 +; SSE3-NEXT: punpckldq {{.*#+}} xmm1 = xmm1[0],xmm0[0],xmm1[1],xmm0[1] +; SSE3-NEXT: punpckldq {{.*#+}} xmm1 = xmm1[0],xmm2[0],xmm1[1],xmm2[1] +; SSE3-NEXT: movdqa %xmm1, %xmm0 +; SSE3-NEXT: retq +; +; SSSE3-LABEL: testv4i32: +; SSSE3: # BB#0: +; SSSE3-NEXT: pshufd {{.*#+}} xmm1 = xmm0[3,1,2,3] +; SSSE3-NEXT: movd %xmm1, %eax +; SSSE3-NEXT: bsrl %eax, %eax +; SSSE3-NEXT: movl $63, %ecx +; SSSE3-NEXT: cmovel %ecx, %eax +; SSSE3-NEXT: xorl $31, %eax +; SSSE3-NEXT: movd %eax, %xmm1 +; SSSE3-NEXT: pshufd {{.*#+}} xmm2 = xmm0[1,1,2,3] +; SSSE3-NEXT: movd %xmm2, %eax +; SSSE3-NEXT: bsrl %eax, %eax +; SSSE3-NEXT: cmovel %ecx, %eax +; SSSE3-NEXT: xorl $31, %eax +; SSSE3-NEXT: movd %eax, %xmm2 +; SSSE3-NEXT: punpckldq {{.*#+}} xmm2 = xmm2[0],xmm1[0],xmm2[1],xmm1[1] +; SSSE3-NEXT: movd %xmm0, %eax +; SSSE3-NEXT: bsrl %eax, %eax +; SSSE3-NEXT: cmovel %ecx, %eax +; SSSE3-NEXT: xorl $31, %eax +; SSSE3-NEXT: movd %eax, %xmm1 +; SSSE3-NEXT: pshufd {{.*#+}} xmm0 = xmm0[2,3,0,1] +; SSSE3-NEXT: movd %xmm0, %eax +; SSSE3-NEXT: bsrl %eax, %eax +; SSSE3-NEXT: cmovel %ecx, %eax +; SSSE3-NEXT: xorl $31, %eax +; SSSE3-NEXT: movd %eax, %xmm0 +; SSSE3-NEXT: punpckldq {{.*#+}} xmm1 = xmm1[0],xmm0[0],xmm1[1],xmm0[1] +; SSSE3-NEXT: punpckldq {{.*#+}} xmm1 = xmm1[0],xmm2[0],xmm1[1],xmm2[1] +; SSSE3-NEXT: movdqa %xmm1, %xmm0 +; SSSE3-NEXT: retq +; +; SSE41-LABEL: testv4i32: +; SSE41: # BB#0: +; SSE41-NEXT: pextrd $1, %xmm0, %eax +; SSE41-NEXT: bsrl %eax, %eax +; SSE41-NEXT: movl $63, %ecx +; SSE41-NEXT: cmovel %ecx, %eax +; SSE41-NEXT: xorl $31, %eax +; SSE41-NEXT: movd %xmm0, %edx +; SSE41-NEXT: bsrl %edx, %edx +; SSE41-NEXT: cmovel %ecx, %edx +; SSE41-NEXT: xorl $31, %edx +; SSE41-NEXT: movd %edx, %xmm1 +; SSE41-NEXT: pinsrd $1, %eax, %xmm1 +; SSE41-NEXT: pextrd $2, %xmm0, %eax +; SSE41-NEXT: bsrl %eax, %eax +; SSE41-NEXT: cmovel %ecx, %eax +; SSE41-NEXT: xorl $31, %eax +; SSE41-NEXT: pinsrd $2, %eax, %xmm1 +; SSE41-NEXT: pextrd $3, %xmm0, %eax +; SSE41-NEXT: bsrl %eax, %eax +; SSE41-NEXT: cmovel %ecx, %eax +; SSE41-NEXT: xorl $31, %eax +; SSE41-NEXT: pinsrd $3, %eax, %xmm1 +; SSE41-NEXT: movdqa %xmm1, %xmm0 +; SSE41-NEXT: retq +; +; AVX-LABEL: testv4i32: +; AVX: # BB#0: +; AVX-NEXT: vpextrd $1, %xmm0, %eax +; AVX-NEXT: bsrl %eax, %eax +; AVX-NEXT: movl $63, %ecx +; AVX-NEXT: cmovel %ecx, %eax +; AVX-NEXT: xorl $31, %eax +; AVX-NEXT: vmovd %xmm0, %edx +; AVX-NEXT: bsrl %edx, %edx +; AVX-NEXT: cmovel %ecx, %edx +; AVX-NEXT: xorl $31, %edx +; AVX-NEXT: vmovd %edx, %xmm1 +; AVX-NEXT: vpinsrd $1, %eax, %xmm1, %xmm1 +; AVX-NEXT: vpextrd $2, %xmm0, %eax +; AVX-NEXT: bsrl %eax, %eax +; AVX-NEXT: cmovel %ecx, %eax +; AVX-NEXT: xorl $31, %eax +; AVX-NEXT: vpinsrd $2, %eax, %xmm1, %xmm1 +; AVX-NEXT: vpextrd $3, %xmm0, %eax +; AVX-NEXT: bsrl %eax, %eax +; AVX-NEXT: cmovel %ecx, %eax +; AVX-NEXT: xorl $31, %eax +; AVX-NEXT: vpinsrd $3, %eax, %xmm1, %xmm0 +; AVX-NEXT: retq + %out = call <4 x i32> @llvm.ctlz.v4i32(<4 x i32> %in, i1 0) + ret <4 x i32> %out +} + +define <4 x i32> @testv4i32u(<4 x i32> %in) { +; SSE2-LABEL: testv4i32u: +; SSE2: # BB#0: +; SSE2-NEXT: pshufd {{.*#+}} xmm1 = xmm0[3,1,2,3] +; SSE2-NEXT: movd %xmm1, %eax +; SSE2-NEXT: bsrl %eax, %eax +; SSE2-NEXT: xorl $31, %eax +; SSE2-NEXT: movd %eax, %xmm1 +; SSE2-NEXT: pshufd {{.*#+}} xmm2 = xmm0[1,1,2,3] +; SSE2-NEXT: movd %xmm2, %eax +; SSE2-NEXT: bsrl %eax, %eax +; SSE2-NEXT: xorl $31, %eax +; SSE2-NEXT: movd %eax, %xmm2 +; SSE2-NEXT: punpckldq {{.*#+}} xmm2 = xmm2[0],xmm1[0],xmm2[1],xmm1[1] +; SSE2-NEXT: movd %xmm0, %eax +; SSE2-NEXT: bsrl %eax, %eax +; SSE2-NEXT: xorl $31, %eax +; SSE2-NEXT: movd %eax, %xmm1 +; SSE2-NEXT: pshufd {{.*#+}} xmm0 = xmm0[2,3,0,1] +; SSE2-NEXT: movd %xmm0, %eax +; SSE2-NEXT: bsrl %eax, %eax +; SSE2-NEXT: xorl $31, %eax +; SSE2-NEXT: movd %eax, %xmm0 +; SSE2-NEXT: punpckldq {{.*#+}} xmm1 = xmm1[0],xmm0[0],xmm1[1],xmm0[1] +; SSE2-NEXT: punpckldq {{.*#+}} xmm1 = xmm1[0],xmm2[0],xmm1[1],xmm2[1] +; SSE2-NEXT: movdqa %xmm1, %xmm0 +; SSE2-NEXT: retq +; +; SSE3-LABEL: testv4i32u: +; SSE3: # BB#0: +; SSE3-NEXT: pshufd {{.*#+}} xmm1 = xmm0[3,1,2,3] +; SSE3-NEXT: movd %xmm1, %eax +; SSE3-NEXT: bsrl %eax, %eax +; SSE3-NEXT: xorl $31, %eax +; SSE3-NEXT: movd %eax, %xmm1 +; SSE3-NEXT: pshufd {{.*#+}} xmm2 = xmm0[1,1,2,3] +; SSE3-NEXT: movd %xmm2, %eax +; SSE3-NEXT: bsrl %eax, %eax +; SSE3-NEXT: xorl $31, %eax +; SSE3-NEXT: movd %eax, %xmm2 +; SSE3-NEXT: punpckldq {{.*#+}} xmm2 = xmm2[0],xmm1[0],xmm2[1],xmm1[1] +; SSE3-NEXT: movd %xmm0, %eax +; SSE3-NEXT: bsrl %eax, %eax +; SSE3-NEXT: xorl $31, %eax +; SSE3-NEXT: movd %eax, %xmm1 +; SSE3-NEXT: pshufd {{.*#+}} xmm0 = xmm0[2,3,0,1] +; SSE3-NEXT: movd %xmm0, %eax +; SSE3-NEXT: bsrl %eax, %eax +; SSE3-NEXT: xorl $31, %eax +; SSE3-NEXT: movd %eax, %xmm0 +; SSE3-NEXT: punpckldq {{.*#+}} xmm1 = xmm1[0],xmm0[0],xmm1[1],xmm0[1] +; SSE3-NEXT: punpckldq {{.*#+}} xmm1 = xmm1[0],xmm2[0],xmm1[1],xmm2[1] +; SSE3-NEXT: movdqa %xmm1, %xmm0 +; SSE3-NEXT: retq +; +; SSSE3-LABEL: testv4i32u: +; SSSE3: # BB#0: +; SSSE3-NEXT: pshufd {{.*#+}} xmm1 = xmm0[3,1,2,3] +; SSSE3-NEXT: movd %xmm1, %eax +; SSSE3-NEXT: bsrl %eax, %eax +; SSSE3-NEXT: xorl $31, %eax +; SSSE3-NEXT: movd %eax, %xmm1 +; SSSE3-NEXT: pshufd {{.*#+}} xmm2 = xmm0[1,1,2,3] +; SSSE3-NEXT: movd %xmm2, %eax +; SSSE3-NEXT: bsrl %eax, %eax +; SSSE3-NEXT: xorl $31, %eax +; SSSE3-NEXT: movd %eax, %xmm2 +; SSSE3-NEXT: punpckldq {{.*#+}} xmm2 = xmm2[0],xmm1[0],xmm2[1],xmm1[1] +; SSSE3-NEXT: movd %xmm0, %eax +; SSSE3-NEXT: bsrl %eax, %eax +; SSSE3-NEXT: xorl $31, %eax +; SSSE3-NEXT: movd %eax, %xmm1 +; SSSE3-NEXT: pshufd {{.*#+}} xmm0 = xmm0[2,3,0,1] +; SSSE3-NEXT: movd %xmm0, %eax +; SSSE3-NEXT: bsrl %eax, %eax +; SSSE3-NEXT: xorl $31, %eax +; SSSE3-NEXT: movd %eax, %xmm0 +; SSSE3-NEXT: punpckldq {{.*#+}} xmm1 = xmm1[0],xmm0[0],xmm1[1],xmm0[1] +; SSSE3-NEXT: punpckldq {{.*#+}} xmm1 = xmm1[0],xmm2[0],xmm1[1],xmm2[1] +; SSSE3-NEXT: movdqa %xmm1, %xmm0 +; SSSE3-NEXT: retq +; +; SSE41-LABEL: testv4i32u: +; SSE41: # BB#0: +; SSE41-NEXT: pextrd $1, %xmm0, %eax +; SSE41-NEXT: bsrl %eax, %eax +; SSE41-NEXT: xorl $31, %eax +; SSE41-NEXT: movd %xmm0, %ecx +; SSE41-NEXT: bsrl %ecx, %ecx +; SSE41-NEXT: xorl $31, %ecx +; SSE41-NEXT: movd %ecx, %xmm1 +; SSE41-NEXT: pinsrd $1, %eax, %xmm1 +; SSE41-NEXT: pextrd $2, %xmm0, %eax +; SSE41-NEXT: bsrl %eax, %eax +; SSE41-NEXT: xorl $31, %eax +; SSE41-NEXT: pinsrd $2, %eax, %xmm1 +; SSE41-NEXT: pextrd $3, %xmm0, %eax +; SSE41-NEXT: bsrl %eax, %eax +; SSE41-NEXT: xorl $31, %eax +; SSE41-NEXT: pinsrd $3, %eax, %xmm1 +; SSE41-NEXT: movdqa %xmm1, %xmm0 +; SSE41-NEXT: retq +; +; AVX-LABEL: testv4i32u: +; AVX: # BB#0: +; AVX-NEXT: vpextrd $1, %xmm0, %eax +; AVX-NEXT: bsrl %eax, %eax +; AVX-NEXT: xorl $31, %eax +; AVX-NEXT: vmovd %xmm0, %ecx +; AVX-NEXT: bsrl %ecx, %ecx +; AVX-NEXT: xorl $31, %ecx +; AVX-NEXT: vmovd %ecx, %xmm1 +; AVX-NEXT: vpinsrd $1, %eax, %xmm1, %xmm1 +; AVX-NEXT: vpextrd $2, %xmm0, %eax +; AVX-NEXT: bsrl %eax, %eax +; AVX-NEXT: xorl $31, %eax +; AVX-NEXT: vpinsrd $2, %eax, %xmm1, %xmm1 +; AVX-NEXT: vpextrd $3, %xmm0, %eax +; AVX-NEXT: bsrl %eax, %eax +; AVX-NEXT: xorl $31, %eax +; AVX-NEXT: vpinsrd $3, %eax, %xmm1, %xmm0 +; AVX-NEXT: retq + %out = call <4 x i32> @llvm.ctlz.v4i32(<4 x i32> %in, i1 -1) + ret <4 x i32> %out +} + +define <8 x i16> @testv8i16(<8 x i16> %in) { +; SSE2-LABEL: testv8i16: +; SSE2: # BB#0: +; SSE2-NEXT: pextrw $7, %xmm0, %eax +; SSE2-NEXT: bsrw %ax, %cx +; SSE2-NEXT: movw $31, %ax +; SSE2-NEXT: cmovew %ax, %cx +; SSE2-NEXT: xorl $15, %ecx +; SSE2-NEXT: movd %ecx, %xmm1 +; SSE2-NEXT: pextrw $3, %xmm0, %ecx +; SSE2-NEXT: bsrw %cx, %cx +; SSE2-NEXT: cmovew %ax, %cx +; SSE2-NEXT: xorl $15, %ecx +; SSE2-NEXT: movd %ecx, %xmm2 +; SSE2-NEXT: punpcklwd {{.*#+}} xmm2 = xmm2[0],xmm1[0],xmm2[1],xmm1[1],xmm2[2],xmm1[2],xmm2[3],xmm1[3] +; SSE2-NEXT: pextrw $5, %xmm0, %ecx +; SSE2-NEXT: bsrw %cx, %cx +; SSE2-NEXT: cmovew %ax, %cx +; SSE2-NEXT: xorl $15, %ecx +; SSE2-NEXT: movd %ecx, %xmm3 +; SSE2-NEXT: pextrw $1, %xmm0, %ecx +; SSE2-NEXT: bsrw %cx, %cx +; SSE2-NEXT: cmovew %ax, %cx +; SSE2-NEXT: xorl $15, %ecx +; SSE2-NEXT: movd %ecx, %xmm1 +; SSE2-NEXT: punpcklwd {{.*#+}} xmm1 = xmm1[0],xmm3[0],xmm1[1],xmm3[1],xmm1[2],xmm3[2],xmm1[3],xmm3[3] +; SSE2-NEXT: punpcklwd {{.*#+}} xmm1 = xmm1[0],xmm2[0],xmm1[1],xmm2[1],xmm1[2],xmm2[2],xmm1[3],xmm2[3] +; SSE2-NEXT: pextrw $6, %xmm0, %ecx +; SSE2-NEXT: bsrw %cx, %cx +; SSE2-NEXT: cmovew %ax, %cx +; SSE2-NEXT: xorl $15, %ecx +; SSE2-NEXT: movd %ecx, %xmm2 +; SSE2-NEXT: pextrw $2, %xmm0, %ecx +; SSE2-NEXT: bsrw %cx, %cx +; SSE2-NEXT: cmovew %ax, %cx +; SSE2-NEXT: xorl $15, %ecx +; SSE2-NEXT: movd %ecx, %xmm3 +; SSE2-NEXT: punpcklwd {{.*#+}} xmm3 = xmm3[0],xmm2[0],xmm3[1],xmm2[1],xmm3[2],xmm2[2],xmm3[3],xmm2[3] +; SSE2-NEXT: pextrw $4, %xmm0, %ecx +; SSE2-NEXT: bsrw %cx, %cx +; SSE2-NEXT: cmovew %ax, %cx +; SSE2-NEXT: xorl $15, %ecx +; SSE2-NEXT: movd %ecx, %xmm2 +; SSE2-NEXT: movd %xmm0, %ecx +; SSE2-NEXT: bsrw %cx, %cx +; SSE2-NEXT: cmovew %ax, %cx +; SSE2-NEXT: xorl $15, %ecx +; SSE2-NEXT: movd %ecx, %xmm0 +; SSE2-NEXT: punpcklwd {{.*#+}} xmm0 = xmm0[0],xmm2[0],xmm0[1],xmm2[1],xmm0[2],xmm2[2],xmm0[3],xmm2[3] +; SSE2-NEXT: punpcklwd {{.*#+}} xmm0 = xmm0[0],xmm3[0],xmm0[1],xmm3[1],xmm0[2],xmm3[2],xmm0[3],xmm3[3] +; SSE2-NEXT: punpcklwd {{.*#+}} xmm0 = xmm0[0],xmm1[0],xmm0[1],xmm1[1],xmm0[2],xmm1[2],xmm0[3],xmm1[3] +; SSE2-NEXT: retq +; +; SSE3-LABEL: testv8i16: +; SSE3: # BB#0: +; SSE3-NEXT: pextrw $7, %xmm0, %eax +; SSE3-NEXT: bsrw %ax, %cx +; SSE3-NEXT: movw $31, %ax +; SSE3-NEXT: cmovew %ax, %cx +; SSE3-NEXT: xorl $15, %ecx +; SSE3-NEXT: movd %ecx, %xmm1 +; SSE3-NEXT: pextrw $3, %xmm0, %ecx +; SSE3-NEXT: bsrw %cx, %cx +; SSE3-NEXT: cmovew %ax, %cx +; SSE3-NEXT: xorl $15, %ecx +; SSE3-NEXT: movd %ecx, %xmm2 +; SSE3-NEXT: punpcklwd {{.*#+}} xmm2 = xmm2[0],xmm1[0],xmm2[1],xmm1[1],xmm2[2],xmm1[2],xmm2[3],xmm1[3] +; SSE3-NEXT: pextrw $5, %xmm0, %ecx +; SSE3-NEXT: bsrw %cx, %cx +; SSE3-NEXT: cmovew %ax, %cx +; SSE3-NEXT: xorl $15, %ecx +; SSE3-NEXT: movd %ecx, %xmm3 +; SSE3-NEXT: pextrw $1, %xmm0, %ecx +; SSE3-NEXT: bsrw %cx, %cx +; SSE3-NEXT: cmovew %ax, %cx +; SSE3-NEXT: xorl $15, %ecx +; SSE3-NEXT: movd %ecx, %xmm1 +; SSE3-NEXT: punpcklwd {{.*#+}} xmm1 = xmm1[0],xmm3[0],xmm1[1],xmm3[1],xmm1[2],xmm3[2],xmm1[3],xmm3[3] +; SSE3-NEXT: punpcklwd {{.*#+}} xmm1 = xmm1[0],xmm2[0],xmm1[1],xmm2[1],xmm1[2],xmm2[2],xmm1[3],xmm2[3] +; SSE3-NEXT: pextrw $6, %xmm0, %ecx +; SSE3-NEXT: bsrw %cx, %cx +; SSE3-NEXT: cmovew %ax, %cx +; SSE3-NEXT: xorl $15, %ecx +; SSE3-NEXT: movd %ecx, %xmm2 +; SSE3-NEXT: pextrw $2, %xmm0, %ecx +; SSE3-NEXT: bsrw %cx, %cx +; SSE3-NEXT: cmovew %ax, %cx +; SSE3-NEXT: xorl $15, %ecx +; SSE3-NEXT: movd %ecx, %xmm3 +; SSE3-NEXT: punpcklwd {{.*#+}} xmm3 = xmm3[0],xmm2[0],xmm3[1],xmm2[1],xmm3[2],xmm2[2],xmm3[3],xmm2[3] +; SSE3-NEXT: pextrw $4, %xmm0, %ecx +; SSE3-NEXT: bsrw %cx, %cx +; SSE3-NEXT: cmovew %ax, %cx +; SSE3-NEXT: xorl $15, %ecx +; SSE3-NEXT: movd %ecx, %xmm2 +; SSE3-NEXT: movd %xmm0, %ecx +; SSE3-NEXT: bsrw %cx, %cx +; SSE3-NEXT: cmovew %ax, %cx +; SSE3-NEXT: xorl $15, %ecx +; SSE3-NEXT: movd %ecx, %xmm0 +; SSE3-NEXT: punpcklwd {{.*#+}} xmm0 = xmm0[0],xmm2[0],xmm0[1],xmm2[1],xmm0[2],xmm2[2],xmm0[3],xmm2[3] +; SSE3-NEXT: punpcklwd {{.*#+}} xmm0 = xmm0[0],xmm3[0],xmm0[1],xmm3[1],xmm0[2],xmm3[2],xmm0[3],xmm3[3] +; SSE3-NEXT: punpcklwd {{.*#+}} xmm0 = xmm0[0],xmm1[0],xmm0[1],xmm1[1],xmm0[2],xmm1[2],xmm0[3],xmm1[3] +; SSE3-NEXT: retq +; +; SSSE3-LABEL: testv8i16: +; SSSE3: # BB#0: +; SSSE3-NEXT: pextrw $7, %xmm0, %eax +; SSSE3-NEXT: bsrw %ax, %cx +; SSSE3-NEXT: movw $31, %ax +; SSSE3-NEXT: cmovew %ax, %cx +; SSSE3-NEXT: xorl $15, %ecx +; SSSE3-NEXT: movd %ecx, %xmm1 +; SSSE3-NEXT: pextrw $3, %xmm0, %ecx +; SSSE3-NEXT: bsrw %cx, %cx +; SSSE3-NEXT: cmovew %ax, %cx +; SSSE3-NEXT: xorl $15, %ecx +; SSSE3-NEXT: movd %ecx, %xmm2 +; SSSE3-NEXT: punpcklwd {{.*#+}} xmm2 = xmm2[0],xmm1[0],xmm2[1],xmm1[1],xmm2[2],xmm1[2],xmm2[3],xmm1[3] +; SSSE3-NEXT: pextrw $5, %xmm0, %ecx +; SSSE3-NEXT: bsrw %cx, %cx +; SSSE3-NEXT: cmovew %ax, %cx +; SSSE3-NEXT: xorl $15, %ecx +; SSSE3-NEXT: movd %ecx, %xmm3 +; SSSE3-NEXT: pextrw $1, %xmm0, %ecx +; SSSE3-NEXT: bsrw %cx, %cx +; SSSE3-NEXT: cmovew %ax, %cx +; SSSE3-NEXT: xorl $15, %ecx +; SSSE3-NEXT: movd %ecx, %xmm1 +; SSSE3-NEXT: punpcklwd {{.*#+}} xmm1 = xmm1[0],xmm3[0],xmm1[1],xmm3[1],xmm1[2],xmm3[2],xmm1[3],xmm3[3] +; SSSE3-NEXT: punpcklwd {{.*#+}} xmm1 = xmm1[0],xmm2[0],xmm1[1],xmm2[1],xmm1[2],xmm2[2],xmm1[3],xmm2[3] +; SSSE3-NEXT: pextrw $6, %xmm0, %ecx +; SSSE3-NEXT: bsrw %cx, %cx +; SSSE3-NEXT: cmovew %ax, %cx +; SSSE3-NEXT: xorl $15, %ecx +; SSSE3-NEXT: movd %ecx, %xmm2 +; SSSE3-NEXT: pextrw $2, %xmm0, %ecx +; SSSE3-NEXT: bsrw %cx, %cx +; SSSE3-NEXT: cmovew %ax, %cx +; SSSE3-NEXT: xorl $15, %ecx +; SSSE3-NEXT: movd %ecx, %xmm3 +; SSSE3-NEXT: punpcklwd {{.*#+}} xmm3 = xmm3[0],xmm2[0],xmm3[1],xmm2[1],xmm3[2],xmm2[2],xmm3[3],xmm2[3] +; SSSE3-NEXT: pextrw $4, %xmm0, %ecx +; SSSE3-NEXT: bsrw %cx, %cx +; SSSE3-NEXT: cmovew %ax, %cx +; SSSE3-NEXT: xorl $15, %ecx +; SSSE3-NEXT: movd %ecx, %xmm2 +; SSSE3-NEXT: movd %xmm0, %ecx +; SSSE3-NEXT: bsrw %cx, %cx +; SSSE3-NEXT: cmovew %ax, %cx +; SSSE3-NEXT: xorl $15, %ecx +; SSSE3-NEXT: movd %ecx, %xmm0 +; SSSE3-NEXT: punpcklwd {{.*#+}} xmm0 = xmm0[0],xmm2[0],xmm0[1],xmm2[1],xmm0[2],xmm2[2],xmm0[3],xmm2[3] +; SSSE3-NEXT: punpcklwd {{.*#+}} xmm0 = xmm0[0],xmm3[0],xmm0[1],xmm3[1],xmm0[2],xmm3[2],xmm0[3],xmm3[3] +; SSSE3-NEXT: punpcklwd {{.*#+}} xmm0 = xmm0[0],xmm1[0],xmm0[1],xmm1[1],xmm0[2],xmm1[2],xmm0[3],xmm1[3] +; SSSE3-NEXT: retq +; +; SSE41-LABEL: testv8i16: +; SSE41: # BB#0: +; SSE41-NEXT: pextrw $1, %xmm0, %eax +; SSE41-NEXT: bsrw %ax, %cx +; SSE41-NEXT: movw $31, %ax +; SSE41-NEXT: cmovew %ax, %cx +; SSE41-NEXT: xorl $15, %ecx +; SSE41-NEXT: movd %xmm0, %edx +; SSE41-NEXT: bsrw %dx, %dx +; SSE41-NEXT: cmovew %ax, %dx +; SSE41-NEXT: xorl $15, %edx +; SSE41-NEXT: movd %edx, %xmm1 +; SSE41-NEXT: pinsrw $1, %ecx, %xmm1 +; SSE41-NEXT: pextrw $2, %xmm0, %ecx +; SSE41-NEXT: bsrw %cx, %cx +; SSE41-NEXT: cmovew %ax, %cx +; SSE41-NEXT: xorl $15, %ecx +; SSE41-NEXT: pinsrw $2, %ecx, %xmm1 +; SSE41-NEXT: pextrw $3, %xmm0, %ecx +; SSE41-NEXT: bsrw %cx, %cx +; SSE41-NEXT: cmovew %ax, %cx +; SSE41-NEXT: xorl $15, %ecx +; SSE41-NEXT: pinsrw $3, %ecx, %xmm1 +; SSE41-NEXT: pextrw $4, %xmm0, %ecx +; SSE41-NEXT: bsrw %cx, %cx +; SSE41-NEXT: cmovew %ax, %cx +; SSE41-NEXT: xorl $15, %ecx +; SSE41-NEXT: pinsrw $4, %ecx, %xmm1 +; SSE41-NEXT: pextrw $5, %xmm0, %ecx +; SSE41-NEXT: bsrw %cx, %cx +; SSE41-NEXT: cmovew %ax, %cx +; SSE41-NEXT: xorl $15, %ecx +; SSE41-NEXT: pinsrw $5, %ecx, %xmm1 +; SSE41-NEXT: pextrw $6, %xmm0, %ecx +; SSE41-NEXT: bsrw %cx, %cx +; SSE41-NEXT: cmovew %ax, %cx +; SSE41-NEXT: xorl $15, %ecx +; SSE41-NEXT: pinsrw $6, %ecx, %xmm1 +; SSE41-NEXT: pextrw $7, %xmm0, %ecx +; SSE41-NEXT: bsrw %cx, %cx +; SSE41-NEXT: cmovew %ax, %cx +; SSE41-NEXT: xorl $15, %ecx +; SSE41-NEXT: pinsrw $7, %ecx, %xmm1 +; SSE41-NEXT: movdqa %xmm1, %xmm0 +; SSE41-NEXT: retq +; +; AVX-LABEL: testv8i16: +; AVX: # BB#0: +; AVX-NEXT: vpextrw $1, %xmm0, %eax +; AVX-NEXT: bsrw %ax, %cx +; AVX-NEXT: movw $31, %ax +; AVX-NEXT: cmovew %ax, %cx +; AVX-NEXT: xorl $15, %ecx +; AVX-NEXT: vmovd %xmm0, %edx +; AVX-NEXT: bsrw %dx, %dx +; AVX-NEXT: cmovew %ax, %dx +; AVX-NEXT: xorl $15, %edx +; AVX-NEXT: vmovd %edx, %xmm1 +; AVX-NEXT: vpinsrw $1, %ecx, %xmm1, %xmm1 +; AVX-NEXT: vpextrw $2, %xmm0, %ecx +; AVX-NEXT: bsrw %cx, %cx +; AVX-NEXT: cmovew %ax, %cx +; AVX-NEXT: xorl $15, %ecx +; AVX-NEXT: vpinsrw $2, %ecx, %xmm1, %xmm1 +; AVX-NEXT: vpextrw $3, %xmm0, %ecx +; AVX-NEXT: bsrw %cx, %cx +; AVX-NEXT: cmovew %ax, %cx +; AVX-NEXT: xorl $15, %ecx +; AVX-NEXT: vpinsrw $3, %ecx, %xmm1, %xmm1 +; AVX-NEXT: vpextrw $4, %xmm0, %ecx +; AVX-NEXT: bsrw %cx, %cx +; AVX-NEXT: cmovew %ax, %cx +; AVX-NEXT: xorl $15, %ecx +; AVX-NEXT: vpinsrw $4, %ecx, %xmm1, %xmm1 +; AVX-NEXT: vpextrw $5, %xmm0, %ecx +; AVX-NEXT: bsrw %cx, %cx +; AVX-NEXT: cmovew %ax, %cx +; AVX-NEXT: xorl $15, %ecx +; AVX-NEXT: vpinsrw $5, %ecx, %xmm1, %xmm1 +; AVX-NEXT: vpextrw $6, %xmm0, %ecx +; AVX-NEXT: bsrw %cx, %cx +; AVX-NEXT: cmovew %ax, %cx +; AVX-NEXT: xorl $15, %ecx +; AVX-NEXT: vpinsrw $6, %ecx, %xmm1, %xmm1 +; AVX-NEXT: vpextrw $7, %xmm0, %ecx +; AVX-NEXT: bsrw %cx, %cx +; AVX-NEXT: cmovew %ax, %cx +; AVX-NEXT: xorl $15, %ecx +; AVX-NEXT: vpinsrw $7, %ecx, %xmm1, %xmm0 +; AVX-NEXT: retq + %out = call <8 x i16> @llvm.ctlz.v8i16(<8 x i16> %in, i1 0) + ret <8 x i16> %out +} + +define <8 x i16> @testv8i16u(<8 x i16> %in) { +; SSE2-LABEL: testv8i16u: +; SSE2: # BB#0: +; SSE2-NEXT: pextrw $7, %xmm0, %eax +; SSE2-NEXT: bsrw %ax, %ax +; SSE2-NEXT: xorl $15, %eax +; SSE2-NEXT: movd %eax, %xmm1 +; SSE2-NEXT: pextrw $3, %xmm0, %eax +; SSE2-NEXT: bsrw %ax, %ax +; SSE2-NEXT: xorl $15, %eax +; SSE2-NEXT: movd %eax, %xmm2 +; SSE2-NEXT: punpcklwd {{.*#+}} xmm2 = xmm2[0],xmm1[0],xmm2[1],xmm1[1],xmm2[2],xmm1[2],xmm2[3],xmm1[3] +; SSE2-NEXT: pextrw $5, %xmm0, %eax +; SSE2-NEXT: bsrw %ax, %ax +; SSE2-NEXT: xorl $15, %eax +; SSE2-NEXT: movd %eax, %xmm3 +; SSE2-NEXT: pextrw $1, %xmm0, %eax +; SSE2-NEXT: bsrw %ax, %ax +; SSE2-NEXT: xorl $15, %eax +; SSE2-NEXT: movd %eax, %xmm1 +; SSE2-NEXT: punpcklwd {{.*#+}} xmm1 = xmm1[0],xmm3[0],xmm1[1],xmm3[1],xmm1[2],xmm3[2],xmm1[3],xmm3[3] +; SSE2-NEXT: punpcklwd {{.*#+}} xmm1 = xmm1[0],xmm2[0],xmm1[1],xmm2[1],xmm1[2],xmm2[2],xmm1[3],xmm2[3] +; SSE2-NEXT: pextrw $6, %xmm0, %eax +; SSE2-NEXT: bsrw %ax, %ax +; SSE2-NEXT: xorl $15, %eax +; SSE2-NEXT: movd %eax, %xmm2 +; SSE2-NEXT: pextrw $2, %xmm0, %eax +; SSE2-NEXT: bsrw %ax, %ax +; SSE2-NEXT: xorl $15, %eax +; SSE2-NEXT: movd %eax, %xmm3 +; SSE2-NEXT: punpcklwd {{.*#+}} xmm3 = xmm3[0],xmm2[0],xmm3[1],xmm2[1],xmm3[2],xmm2[2],xmm3[3],xmm2[3] +; SSE2-NEXT: pextrw $4, %xmm0, %eax +; SSE2-NEXT: bsrw %ax, %ax +; SSE2-NEXT: xorl $15, %eax +; SSE2-NEXT: movd %eax, %xmm2 +; SSE2-NEXT: movd %xmm0, %eax +; SSE2-NEXT: bsrw %ax, %ax +; SSE2-NEXT: xorl $15, %eax +; SSE2-NEXT: movd %eax, %xmm0 +; SSE2-NEXT: punpcklwd {{.*#+}} xmm0 = xmm0[0],xmm2[0],xmm0[1],xmm2[1],xmm0[2],xmm2[2],xmm0[3],xmm2[3] +; SSE2-NEXT: punpcklwd {{.*#+}} xmm0 = xmm0[0],xmm3[0],xmm0[1],xmm3[1],xmm0[2],xmm3[2],xmm0[3],xmm3[3] +; SSE2-NEXT: punpcklwd {{.*#+}} xmm0 = xmm0[0],xmm1[0],xmm0[1],xmm1[1],xmm0[2],xmm1[2],xmm0[3],xmm1[3] +; SSE2-NEXT: retq +; +; SSE3-LABEL: testv8i16u: +; SSE3: # BB#0: +; SSE3-NEXT: pextrw $7, %xmm0, %eax +; SSE3-NEXT: bsrw %ax, %ax +; SSE3-NEXT: xorl $15, %eax +; SSE3-NEXT: movd %eax, %xmm1 +; SSE3-NEXT: pextrw $3, %xmm0, %eax +; SSE3-NEXT: bsrw %ax, %ax +; SSE3-NEXT: xorl $15, %eax +; SSE3-NEXT: movd %eax, %xmm2 +; SSE3-NEXT: punpcklwd {{.*#+}} xmm2 = xmm2[0],xmm1[0],xmm2[1],xmm1[1],xmm2[2],xmm1[2],xmm2[3],xmm1[3] +; SSE3-NEXT: pextrw $5, %xmm0, %eax +; SSE3-NEXT: bsrw %ax, %ax +; SSE3-NEXT: xorl $15, %eax +; SSE3-NEXT: movd %eax, %xmm3 +; SSE3-NEXT: pextrw $1, %xmm0, %eax +; SSE3-NEXT: bsrw %ax, %ax +; SSE3-NEXT: xorl $15, %eax +; SSE3-NEXT: movd %eax, %xmm1 +; SSE3-NEXT: punpcklwd {{.*#+}} xmm1 = xmm1[0],xmm3[0],xmm1[1],xmm3[1],xmm1[2],xmm3[2],xmm1[3],xmm3[3] +; SSE3-NEXT: punpcklwd {{.*#+}} xmm1 = xmm1[0],xmm2[0],xmm1[1],xmm2[1],xmm1[2],xmm2[2],xmm1[3],xmm2[3] +; SSE3-NEXT: pextrw $6, %xmm0, %eax +; SSE3-NEXT: bsrw %ax, %ax +; SSE3-NEXT: xorl $15, %eax +; SSE3-NEXT: movd %eax, %xmm2 +; SSE3-NEXT: pextrw $2, %xmm0, %eax +; SSE3-NEXT: bsrw %ax, %ax +; SSE3-NEXT: xorl $15, %eax +; SSE3-NEXT: movd %eax, %xmm3 +; SSE3-NEXT: punpcklwd {{.*#+}} xmm3 = xmm3[0],xmm2[0],xmm3[1],xmm2[1],xmm3[2],xmm2[2],xmm3[3],xmm2[3] +; SSE3-NEXT: pextrw $4, %xmm0, %eax +; SSE3-NEXT: bsrw %ax, %ax +; SSE3-NEXT: xorl $15, %eax +; SSE3-NEXT: movd %eax, %xmm2 +; SSE3-NEXT: movd %xmm0, %eax +; SSE3-NEXT: bsrw %ax, %ax +; SSE3-NEXT: xorl $15, %eax +; SSE3-NEXT: movd %eax, %xmm0 +; SSE3-NEXT: punpcklwd {{.*#+}} xmm0 = xmm0[0],xmm2[0],xmm0[1],xmm2[1],xmm0[2],xmm2[2],xmm0[3],xmm2[3] +; SSE3-NEXT: punpcklwd {{.*#+}} xmm0 = xmm0[0],xmm3[0],xmm0[1],xmm3[1],xmm0[2],xmm3[2],xmm0[3],xmm3[3] +; SSE3-NEXT: punpcklwd {{.*#+}} xmm0 = xmm0[0],xmm1[0],xmm0[1],xmm1[1],xmm0[2],xmm1[2],xmm0[3],xmm1[3] +; SSE3-NEXT: retq +; +; SSSE3-LABEL: testv8i16u: +; SSSE3: # BB#0: +; SSSE3-NEXT: pextrw $7, %xmm0, %eax +; SSSE3-NEXT: bsrw %ax, %ax +; SSSE3-NEXT: xorl $15, %eax +; SSSE3-NEXT: movd %eax, %xmm1 +; SSSE3-NEXT: pextrw $3, %xmm0, %eax +; SSSE3-NEXT: bsrw %ax, %ax +; SSSE3-NEXT: xorl $15, %eax +; SSSE3-NEXT: movd %eax, %xmm2 +; SSSE3-NEXT: punpcklwd {{.*#+}} xmm2 = xmm2[0],xmm1[0],xmm2[1],xmm1[1],xmm2[2],xmm1[2],xmm2[3],xmm1[3] +; SSSE3-NEXT: pextrw $5, %xmm0, %eax +; SSSE3-NEXT: bsrw %ax, %ax +; SSSE3-NEXT: xorl $15, %eax +; SSSE3-NEXT: movd %eax, %xmm3 +; SSSE3-NEXT: pextrw $1, %xmm0, %eax +; SSSE3-NEXT: bsrw %ax, %ax +; SSSE3-NEXT: xorl $15, %eax +; SSSE3-NEXT: movd %eax, %xmm1 +; SSSE3-NEXT: punpcklwd {{.*#+}} xmm1 = xmm1[0],xmm3[0],xmm1[1],xmm3[1],xmm1[2],xmm3[2],xmm1[3],xmm3[3] +; SSSE3-NEXT: punpcklwd {{.*#+}} xmm1 = xmm1[0],xmm2[0],xmm1[1],xmm2[1],xmm1[2],xmm2[2],xmm1[3],xmm2[3] +; SSSE3-NEXT: pextrw $6, %xmm0, %eax +; SSSE3-NEXT: bsrw %ax, %ax +; SSSE3-NEXT: xorl $15, %eax +; SSSE3-NEXT: movd %eax, %xmm2 +; SSSE3-NEXT: pextrw $2, %xmm0, %eax +; SSSE3-NEXT: bsrw %ax, %ax +; SSSE3-NEXT: xorl $15, %eax +; SSSE3-NEXT: movd %eax, %xmm3 +; SSSE3-NEXT: punpcklwd {{.*#+}} xmm3 = xmm3[0],xmm2[0],xmm3[1],xmm2[1],xmm3[2],xmm2[2],xmm3[3],xmm2[3] +; SSSE3-NEXT: pextrw $4, %xmm0, %eax +; SSSE3-NEXT: bsrw %ax, %ax +; SSSE3-NEXT: xorl $15, %eax +; SSSE3-NEXT: movd %eax, %xmm2 +; SSSE3-NEXT: movd %xmm0, %eax +; SSSE3-NEXT: bsrw %ax, %ax +; SSSE3-NEXT: xorl $15, %eax +; SSSE3-NEXT: movd %eax, %xmm0 +; SSSE3-NEXT: punpcklwd {{.*#+}} xmm0 = xmm0[0],xmm2[0],xmm0[1],xmm2[1],xmm0[2],xmm2[2],xmm0[3],xmm2[3] +; SSSE3-NEXT: punpcklwd {{.*#+}} xmm0 = xmm0[0],xmm3[0],xmm0[1],xmm3[1],xmm0[2],xmm3[2],xmm0[3],xmm3[3] +; SSSE3-NEXT: punpcklwd {{.*#+}} xmm0 = xmm0[0],xmm1[0],xmm0[1],xmm1[1],xmm0[2],xmm1[2],xmm0[3],xmm1[3] +; SSSE3-NEXT: retq +; +; SSE41-LABEL: testv8i16u: +; SSE41: # BB#0: +; SSE41-NEXT: pextrw $1, %xmm0, %eax +; SSE41-NEXT: bsrw %ax, %ax +; SSE41-NEXT: xorl $15, %eax +; SSE41-NEXT: movd %xmm0, %ecx +; SSE41-NEXT: bsrw %cx, %cx +; SSE41-NEXT: xorl $15, %ecx +; SSE41-NEXT: movd %ecx, %xmm1 +; SSE41-NEXT: pinsrw $1, %eax, %xmm1 +; SSE41-NEXT: pextrw $2, %xmm0, %eax +; SSE41-NEXT: bsrw %ax, %ax +; SSE41-NEXT: xorl $15, %eax +; SSE41-NEXT: pinsrw $2, %eax, %xmm1 +; SSE41-NEXT: pextrw $3, %xmm0, %eax +; SSE41-NEXT: bsrw %ax, %ax +; SSE41-NEXT: xorl $15, %eax +; SSE41-NEXT: pinsrw $3, %eax, %xmm1 +; SSE41-NEXT: pextrw $4, %xmm0, %eax +; SSE41-NEXT: bsrw %ax, %ax +; SSE41-NEXT: xorl $15, %eax +; SSE41-NEXT: pinsrw $4, %eax, %xmm1 +; SSE41-NEXT: pextrw $5, %xmm0, %eax +; SSE41-NEXT: bsrw %ax, %ax +; SSE41-NEXT: xorl $15, %eax +; SSE41-NEXT: pinsrw $5, %eax, %xmm1 +; SSE41-NEXT: pextrw $6, %xmm0, %eax +; SSE41-NEXT: bsrw %ax, %ax +; SSE41-NEXT: xorl $15, %eax +; SSE41-NEXT: pinsrw $6, %eax, %xmm1 +; SSE41-NEXT: pextrw $7, %xmm0, %eax +; SSE41-NEXT: bsrw %ax, %ax +; SSE41-NEXT: xorl $15, %eax +; SSE41-NEXT: pinsrw $7, %eax, %xmm1 +; SSE41-NEXT: movdqa %xmm1, %xmm0 +; SSE41-NEXT: retq +; +; AVX-LABEL: testv8i16u: +; AVX: # BB#0: +; AVX-NEXT: vpextrw $1, %xmm0, %eax +; AVX-NEXT: bsrw %ax, %ax +; AVX-NEXT: xorl $15, %eax +; AVX-NEXT: vmovd %xmm0, %ecx +; AVX-NEXT: bsrw %cx, %cx +; AVX-NEXT: xorl $15, %ecx +; AVX-NEXT: vmovd %ecx, %xmm1 +; AVX-NEXT: vpinsrw $1, %eax, %xmm1, %xmm1 +; AVX-NEXT: vpextrw $2, %xmm0, %eax +; AVX-NEXT: bsrw %ax, %ax +; AVX-NEXT: xorl $15, %eax +; AVX-NEXT: vpinsrw $2, %eax, %xmm1, %xmm1 +; AVX-NEXT: vpextrw $3, %xmm0, %eax +; AVX-NEXT: bsrw %ax, %ax +; AVX-NEXT: xorl $15, %eax +; AVX-NEXT: vpinsrw $3, %eax, %xmm1, %xmm1 +; AVX-NEXT: vpextrw $4, %xmm0, %eax +; AVX-NEXT: bsrw %ax, %ax +; AVX-NEXT: xorl $15, %eax +; AVX-NEXT: vpinsrw $4, %eax, %xmm1, %xmm1 +; AVX-NEXT: vpextrw $5, %xmm0, %eax +; AVX-NEXT: bsrw %ax, %ax +; AVX-NEXT: xorl $15, %eax +; AVX-NEXT: vpinsrw $5, %eax, %xmm1, %xmm1 +; AVX-NEXT: vpextrw $6, %xmm0, %eax +; AVX-NEXT: bsrw %ax, %ax +; AVX-NEXT: xorl $15, %eax +; AVX-NEXT: vpinsrw $6, %eax, %xmm1, %xmm1 +; AVX-NEXT: vpextrw $7, %xmm0, %eax +; AVX-NEXT: bsrw %ax, %ax +; AVX-NEXT: xorl $15, %eax +; AVX-NEXT: vpinsrw $7, %eax, %xmm1, %xmm0 +; AVX-NEXT: retq + %out = call <8 x i16> @llvm.ctlz.v8i16(<8 x i16> %in, i1 -1) + ret <8 x i16> %out +} + +define <16 x i8> @testv16i8(<16 x i8> %in) { +; SSE2-LABEL: testv16i8: +; SSE2: # BB#0: +; SSE2: pushq %rbp +; SSE2: movaps %xmm0, -24(%rsp) +; SSE2-NEXT: movzbl -9(%rsp), %eax +; SSE2-NEXT: bsrl %eax, %ecx +; SSE2-NEXT: movl $15, %eax +; SSE2-NEXT: cmovel %eax, %ecx +; SSE2-NEXT: xorl $7, %ecx +; SSE2-NEXT: movd %ecx, %xmm0 +; SSE2-NEXT: movzbl -10(%rsp), %ebx +; SSE2-NEXT: movzbl -11(%rsp), %edi +; SSE2-NEXT: movzbl -12(%rsp), %r9d +; SSE2-NEXT: movzbl -13(%rsp), %edx +; SSE2-NEXT: movzbl -14(%rsp), %r11d +; SSE2-NEXT: movzbl -15(%rsp), %esi +; SSE2-NEXT: movzbl -16(%rsp), %r8d +; SSE2-NEXT: movzbl -17(%rsp), %ecx +; SSE2-NEXT: bsrl %ecx, %ecx +; SSE2-NEXT: cmovel %eax, %ecx +; SSE2-NEXT: xorl $7, %ecx +; SSE2-NEXT: movd %ecx, %xmm1 +; SSE2-NEXT: punpcklbw {{.*#+}} xmm1 = xmm1[0],xmm0[0],xmm1[1],xmm0[1],xmm1[2],xmm0[2],xmm1[3],xmm0[3],xmm1[4],xmm0[4],xmm1[5],xmm0[5],xmm1[6],xmm0[6],xmm1[7],xmm0[7] +; SSE2-NEXT: bsrl %edx, %ecx +; SSE2-NEXT: cmovel %eax, %ecx +; SSE2-NEXT: xorl $7, %ecx +; SSE2-NEXT: movd %ecx, %xmm2 +; SSE2-NEXT: movzbl -18(%rsp), %edx +; SSE2-NEXT: movzbl -19(%rsp), %ecx +; SSE2-NEXT: movzbl -20(%rsp), %r10d +; SSE2-NEXT: movzbl -21(%rsp), %ebp +; SSE2-NEXT: bsrl %ebp, %ebp +; SSE2-NEXT: cmovel %eax, %ebp +; SSE2-NEXT: xorl $7, %ebp +; SSE2-NEXT: movd %ebp, %xmm0 +; SSE2-NEXT: punpcklbw {{.*#+}} xmm0 = xmm0[0],xmm2[0],xmm0[1],xmm2[1],xmm0[2],xmm2[2],xmm0[3],xmm2[3],xmm0[4],xmm2[4],xmm0[5],xmm2[5],xmm0[6],xmm2[6],xmm0[7],xmm2[7] +; SSE2-NEXT: punpcklbw {{.*#+}} xmm0 = xmm0[0],xmm1[0],xmm0[1],xmm1[1],xmm0[2],xmm1[2],xmm0[3],xmm1[3],xmm0[4],xmm1[4],xmm0[5],xmm1[5],xmm0[6],xmm1[6],xmm0[7],xmm1[7] +; SSE2-NEXT: bsrl %edi, %edi +; SSE2-NEXT: cmovel %eax, %edi +; SSE2-NEXT: xorl $7, %edi +; SSE2-NEXT: movd %edi, %xmm1 +; SSE2-NEXT: bsrl %ecx, %ecx +; SSE2-NEXT: cmovel %eax, %ecx +; SSE2-NEXT: xorl $7, %ecx +; SSE2-NEXT: movd %ecx, %xmm2 +; SSE2-NEXT: punpcklbw {{.*#+}} xmm2 = xmm2[0],xmm1[0],xmm2[1],xmm1[1],xmm2[2],xmm1[2],xmm2[3],xmm1[3],xmm2[4],xmm1[4],xmm2[5],xmm1[5],xmm2[6],xmm1[6],xmm2[7],xmm1[7] +; SSE2-NEXT: bsrl %esi, %ecx +; SSE2-NEXT: cmovel %eax, %ecx +; SSE2-NEXT: xorl $7, %ecx +; SSE2-NEXT: movd %ecx, %xmm3 +; SSE2-NEXT: movzbl -22(%rsp), %esi +; SSE2-NEXT: movzbl -23(%rsp), %ecx +; SSE2-NEXT: bsrl %ecx, %ecx +; SSE2-NEXT: cmovel %eax, %ecx +; SSE2-NEXT: xorl $7, %ecx +; SSE2-NEXT: movd %ecx, %xmm1 +; SSE2-NEXT: punpcklbw {{.*#+}} xmm1 = xmm1[0],xmm3[0],xmm1[1],xmm3[1],xmm1[2],xmm3[2],xmm1[3],xmm3[3],xmm1[4],xmm3[4],xmm1[5],xmm3[5],xmm1[6],xmm3[6],xmm1[7],xmm3[7] +; SSE2-NEXT: punpcklbw {{.*#+}} xmm1 = xmm1[0],xmm2[0],xmm1[1],xmm2[1],xmm1[2],xmm2[2],xmm1[3],xmm2[3],xmm1[4],xmm2[4],xmm1[5],xmm2[5],xmm1[6],xmm2[6],xmm1[7],xmm2[7] +; SSE2-NEXT: punpcklbw {{.*#+}} xmm1 = xmm1[0],xmm0[0],xmm1[1],xmm0[1],xmm1[2],xmm0[2],xmm1[3],xmm0[3],xmm1[4],xmm0[4],xmm1[5],xmm0[5],xmm1[6],xmm0[6],xmm1[7],xmm0[7] +; SSE2-NEXT: bsrl %ebx, %ecx +; SSE2-NEXT: cmovel %eax, %ecx +; SSE2-NEXT: xorl $7, %ecx +; SSE2-NEXT: movd %ecx, %xmm0 +; SSE2-NEXT: bsrl %edx, %ecx +; SSE2-NEXT: cmovel %eax, %ecx +; SSE2-NEXT: xorl $7, %ecx +; SSE2-NEXT: movd %ecx, %xmm3 +; SSE2-NEXT: punpcklbw {{.*#+}} xmm3 = xmm3[0],xmm0[0],xmm3[1],xmm0[1],xmm3[2],xmm0[2],xmm3[3],xmm0[3],xmm3[4],xmm0[4],xmm3[5],xmm0[5],xmm3[6],xmm0[6],xmm3[7],xmm0[7] +; SSE2-NEXT: bsrl %r11d, %ecx +; SSE2-NEXT: cmovel %eax, %ecx +; SSE2-NEXT: xorl $7, %ecx +; SSE2-NEXT: movd %ecx, %xmm0 +; SSE2-NEXT: bsrl %esi, %ecx +; SSE2-NEXT: cmovel %eax, %ecx +; SSE2-NEXT: xorl $7, %ecx +; SSE2-NEXT: movd %ecx, %xmm2 +; SSE2-NEXT: punpcklbw {{.*#+}} xmm2 = xmm2[0],xmm0[0],xmm2[1],xmm0[1],xmm2[2],xmm0[2],xmm2[3],xmm0[3],xmm2[4],xmm0[4],xmm2[5],xmm0[5],xmm2[6],xmm0[6],xmm2[7],xmm0[7] +; SSE2-NEXT: punpcklbw {{.*#+}} xmm2 = xmm2[0],xmm3[0],xmm2[1],xmm3[1],xmm2[2],xmm3[2],xmm2[3],xmm3[3],xmm2[4],xmm3[4],xmm2[5],xmm3[5],xmm2[6],xmm3[6],xmm2[7],xmm3[7] +; SSE2-NEXT: bsrl %r9d, %ecx +; SSE2-NEXT: cmovel %eax, %ecx +; SSE2-NEXT: xorl $7, %ecx +; SSE2-NEXT: movd %ecx, %xmm0 +; SSE2-NEXT: bsrl %r10d, %ecx +; SSE2-NEXT: cmovel %eax, %ecx +; SSE2-NEXT: xorl $7, %ecx +; SSE2-NEXT: movd %ecx, %xmm3 +; SSE2-NEXT: punpcklbw {{.*#+}} xmm3 = xmm3[0],xmm0[0],xmm3[1],xmm0[1],xmm3[2],xmm0[2],xmm3[3],xmm0[3],xmm3[4],xmm0[4],xmm3[5],xmm0[5],xmm3[6],xmm0[6],xmm3[7],xmm0[7] +; SSE2-NEXT: bsrl %r8d, %ecx +; SSE2-NEXT: cmovel %eax, %ecx +; SSE2-NEXT: xorl $7, %ecx +; SSE2-NEXT: movd %ecx, %xmm4 +; SSE2-NEXT: movzbl -24(%rsp), %ecx +; SSE2-NEXT: bsrl %ecx, %ecx +; SSE2-NEXT: cmovel %eax, %ecx +; SSE2-NEXT: xorl $7, %ecx +; SSE2-NEXT: movd %ecx, %xmm0 +; SSE2-NEXT: punpcklbw {{.*#+}} xmm0 = xmm0[0],xmm4[0],xmm0[1],xmm4[1],xmm0[2],xmm4[2],xmm0[3],xmm4[3],xmm0[4],xmm4[4],xmm0[5],xmm4[5],xmm0[6],xmm4[6],xmm0[7],xmm4[7] +; SSE2-NEXT: punpcklbw {{.*#+}} xmm0 = xmm0[0],xmm3[0],xmm0[1],xmm3[1],xmm0[2],xmm3[2],xmm0[3],xmm3[3],xmm0[4],xmm3[4],xmm0[5],xmm3[5],xmm0[6],xmm3[6],xmm0[7],xmm3[7] +; SSE2-NEXT: punpcklbw {{.*#+}} xmm0 = xmm0[0],xmm2[0],xmm0[1],xmm2[1],xmm0[2],xmm2[2],xmm0[3],xmm2[3],xmm0[4],xmm2[4],xmm0[5],xmm2[5],xmm0[6],xmm2[6],xmm0[7],xmm2[7] +; SSE2-NEXT: punpcklbw {{.*#+}} xmm0 = xmm0[0],xmm1[0],xmm0[1],xmm1[1],xmm0[2],xmm1[2],xmm0[3],xmm1[3],xmm0[4],xmm1[4],xmm0[5],xmm1[5],xmm0[6],xmm1[6],xmm0[7],xmm1[7] +; SSE2-NEXT: popq %rbx +; SSE2-NEXT: popq %rbp +; SSE2-NEXT: retq +; +; SSE3-LABEL: testv16i8: +; SSE3: # BB#0: +; SSE3: pushq %rbp +; SSE3: movaps %xmm0, -24(%rsp) +; SSE3-NEXT: movzbl -9(%rsp), %eax +; SSE3-NEXT: bsrl %eax, %ecx +; SSE3-NEXT: movl $15, %eax +; SSE3-NEXT: cmovel %eax, %ecx +; SSE3-NEXT: xorl $7, %ecx +; SSE3-NEXT: movd %ecx, %xmm0 +; SSE3-NEXT: movzbl -10(%rsp), %ebx +; SSE3-NEXT: movzbl -11(%rsp), %edi +; SSE3-NEXT: movzbl -12(%rsp), %r9d +; SSE3-NEXT: movzbl -13(%rsp), %edx +; SSE3-NEXT: movzbl -14(%rsp), %r11d +; SSE3-NEXT: movzbl -15(%rsp), %esi +; SSE3-NEXT: movzbl -16(%rsp), %r8d +; SSE3-NEXT: movzbl -17(%rsp), %ecx +; SSE3-NEXT: bsrl %ecx, %ecx +; SSE3-NEXT: cmovel %eax, %ecx +; SSE3-NEXT: xorl $7, %ecx +; SSE3-NEXT: movd %ecx, %xmm1 +; SSE3-NEXT: punpcklbw {{.*#+}} xmm1 = xmm1[0],xmm0[0],xmm1[1],xmm0[1],xmm1[2],xmm0[2],xmm1[3],xmm0[3],xmm1[4],xmm0[4],xmm1[5],xmm0[5],xmm1[6],xmm0[6],xmm1[7],xmm0[7] +; SSE3-NEXT: bsrl %edx, %ecx +; SSE3-NEXT: cmovel %eax, %ecx +; SSE3-NEXT: xorl $7, %ecx +; SSE3-NEXT: movd %ecx, %xmm2 +; SSE3-NEXT: movzbl -18(%rsp), %edx +; SSE3-NEXT: movzbl -19(%rsp), %ecx +; SSE3-NEXT: movzbl -20(%rsp), %r10d +; SSE3-NEXT: movzbl -21(%rsp), %ebp +; SSE3-NEXT: bsrl %ebp, %ebp +; SSE3-NEXT: cmovel %eax, %ebp +; SSE3-NEXT: xorl $7, %ebp +; SSE3-NEXT: movd %ebp, %xmm0 +; SSE3-NEXT: punpcklbw {{.*#+}} xmm0 = xmm0[0],xmm2[0],xmm0[1],xmm2[1],xmm0[2],xmm2[2],xmm0[3],xmm2[3],xmm0[4],xmm2[4],xmm0[5],xmm2[5],xmm0[6],xmm2[6],xmm0[7],xmm2[7] +; SSE3-NEXT: punpcklbw {{.*#+}} xmm0 = xmm0[0],xmm1[0],xmm0[1],xmm1[1],xmm0[2],xmm1[2],xmm0[3],xmm1[3],xmm0[4],xmm1[4],xmm0[5],xmm1[5],xmm0[6],xmm1[6],xmm0[7],xmm1[7] +; SSE3-NEXT: bsrl %edi, %edi +; SSE3-NEXT: cmovel %eax, %edi +; SSE3-NEXT: xorl $7, %edi +; SSE3-NEXT: movd %edi, %xmm1 +; SSE3-NEXT: bsrl %ecx, %ecx +; SSE3-NEXT: cmovel %eax, %ecx +; SSE3-NEXT: xorl $7, %ecx +; SSE3-NEXT: movd %ecx, %xmm2 +; SSE3-NEXT: punpcklbw {{.*#+}} xmm2 = xmm2[0],xmm1[0],xmm2[1],xmm1[1],xmm2[2],xmm1[2],xmm2[3],xmm1[3],xmm2[4],xmm1[4],xmm2[5],xmm1[5],xmm2[6],xmm1[6],xmm2[7],xmm1[7] +; SSE3-NEXT: bsrl %esi, %ecx +; SSE3-NEXT: cmovel %eax, %ecx +; SSE3-NEXT: xorl $7, %ecx +; SSE3-NEXT: movd %ecx, %xmm3 +; SSE3-NEXT: movzbl -22(%rsp), %esi +; SSE3-NEXT: movzbl -23(%rsp), %ecx +; SSE3-NEXT: bsrl %ecx, %ecx +; SSE3-NEXT: cmovel %eax, %ecx +; SSE3-NEXT: xorl $7, %ecx +; SSE3-NEXT: movd %ecx, %xmm1 +; SSE3-NEXT: punpcklbw {{.*#+}} xmm1 = xmm1[0],xmm3[0],xmm1[1],xmm3[1],xmm1[2],xmm3[2],xmm1[3],xmm3[3],xmm1[4],xmm3[4],xmm1[5],xmm3[5],xmm1[6],xmm3[6],xmm1[7],xmm3[7] +; SSE3-NEXT: punpcklbw {{.*#+}} xmm1 = xmm1[0],xmm2[0],xmm1[1],xmm2[1],xmm1[2],xmm2[2],xmm1[3],xmm2[3],xmm1[4],xmm2[4],xmm1[5],xmm2[5],xmm1[6],xmm2[6],xmm1[7],xmm2[7] +; SSE3-NEXT: punpcklbw {{.*#+}} xmm1 = xmm1[0],xmm0[0],xmm1[1],xmm0[1],xmm1[2],xmm0[2],xmm1[3],xmm0[3],xmm1[4],xmm0[4],xmm1[5],xmm0[5],xmm1[6],xmm0[6],xmm1[7],xmm0[7] +; SSE3-NEXT: bsrl %ebx, %ecx +; SSE3-NEXT: cmovel %eax, %ecx +; SSE3-NEXT: xorl $7, %ecx +; SSE3-NEXT: movd %ecx, %xmm0 +; SSE3-NEXT: bsrl %edx, %ecx +; SSE3-NEXT: cmovel %eax, %ecx +; SSE3-NEXT: xorl $7, %ecx +; SSE3-NEXT: movd %ecx, %xmm3 +; SSE3-NEXT: punpcklbw {{.*#+}} xmm3 = xmm3[0],xmm0[0],xmm3[1],xmm0[1],xmm3[2],xmm0[2],xmm3[3],xmm0[3],xmm3[4],xmm0[4],xmm3[5],xmm0[5],xmm3[6],xmm0[6],xmm3[7],xmm0[7] +; SSE3-NEXT: bsrl %r11d, %ecx +; SSE3-NEXT: cmovel %eax, %ecx +; SSE3-NEXT: xorl $7, %ecx +; SSE3-NEXT: movd %ecx, %xmm0 +; SSE3-NEXT: bsrl %esi, %ecx +; SSE3-NEXT: cmovel %eax, %ecx +; SSE3-NEXT: xorl $7, %ecx +; SSE3-NEXT: movd %ecx, %xmm2 +; SSE3-NEXT: punpcklbw {{.*#+}} xmm2 = xmm2[0],xmm0[0],xmm2[1],xmm0[1],xmm2[2],xmm0[2],xmm2[3],xmm0[3],xmm2[4],xmm0[4],xmm2[5],xmm0[5],xmm2[6],xmm0[6],xmm2[7],xmm0[7] +; SSE3-NEXT: punpcklbw {{.*#+}} xmm2 = xmm2[0],xmm3[0],xmm2[1],xmm3[1],xmm2[2],xmm3[2],xmm2[3],xmm3[3],xmm2[4],xmm3[4],xmm2[5],xmm3[5],xmm2[6],xmm3[6],xmm2[7],xmm3[7] +; SSE3-NEXT: bsrl %r9d, %ecx +; SSE3-NEXT: cmovel %eax, %ecx +; SSE3-NEXT: xorl $7, %ecx +; SSE3-NEXT: movd %ecx, %xmm0 +; SSE3-NEXT: bsrl %r10d, %ecx +; SSE3-NEXT: cmovel %eax, %ecx +; SSE3-NEXT: xorl $7, %ecx +; SSE3-NEXT: movd %ecx, %xmm3 +; SSE3-NEXT: punpcklbw {{.*#+}} xmm3 = xmm3[0],xmm0[0],xmm3[1],xmm0[1],xmm3[2],xmm0[2],xmm3[3],xmm0[3],xmm3[4],xmm0[4],xmm3[5],xmm0[5],xmm3[6],xmm0[6],xmm3[7],xmm0[7] +; SSE3-NEXT: bsrl %r8d, %ecx +; SSE3-NEXT: cmovel %eax, %ecx +; SSE3-NEXT: xorl $7, %ecx +; SSE3-NEXT: movd %ecx, %xmm4 +; SSE3-NEXT: movzbl -24(%rsp), %ecx +; SSE3-NEXT: bsrl %ecx, %ecx +; SSE3-NEXT: cmovel %eax, %ecx +; SSE3-NEXT: xorl $7, %ecx +; SSE3-NEXT: movd %ecx, %xmm0 +; SSE3-NEXT: punpcklbw {{.*#+}} xmm0 = xmm0[0],xmm4[0],xmm0[1],xmm4[1],xmm0[2],xmm4[2],xmm0[3],xmm4[3],xmm0[4],xmm4[4],xmm0[5],xmm4[5],xmm0[6],xmm4[6],xmm0[7],xmm4[7] +; SSE3-NEXT: punpcklbw {{.*#+}} xmm0 = xmm0[0],xmm3[0],xmm0[1],xmm3[1],xmm0[2],xmm3[2],xmm0[3],xmm3[3],xmm0[4],xmm3[4],xmm0[5],xmm3[5],xmm0[6],xmm3[6],xmm0[7],xmm3[7] +; SSE3-NEXT: punpcklbw {{.*#+}} xmm0 = xmm0[0],xmm2[0],xmm0[1],xmm2[1],xmm0[2],xmm2[2],xmm0[3],xmm2[3],xmm0[4],xmm2[4],xmm0[5],xmm2[5],xmm0[6],xmm2[6],xmm0[7],xmm2[7] +; SSE3-NEXT: punpcklbw {{.*#+}} xmm0 = xmm0[0],xmm1[0],xmm0[1],xmm1[1],xmm0[2],xmm1[2],xmm0[3],xmm1[3],xmm0[4],xmm1[4],xmm0[5],xmm1[5],xmm0[6],xmm1[6],xmm0[7],xmm1[7] +; SSE3-NEXT: popq %rbx +; SSE3-NEXT: popq %rbp +; SSE3-NEXT: retq +; +; SSSE3-LABEL: testv16i8: +; SSSE3: # BB#0: +; SSSE3: pushq %rbp +; SSSE3: movaps %xmm0, -24(%rsp) +; SSSE3-NEXT: movzbl -9(%rsp), %eax +; SSSE3-NEXT: bsrl %eax, %ecx +; SSSE3-NEXT: movl $15, %eax +; SSSE3-NEXT: cmovel %eax, %ecx +; SSSE3-NEXT: xorl $7, %ecx +; SSSE3-NEXT: movd %ecx, %xmm0 +; SSSE3-NEXT: movzbl -10(%rsp), %ebx +; SSSE3-NEXT: movzbl -11(%rsp), %edi +; SSSE3-NEXT: movzbl -12(%rsp), %r9d +; SSSE3-NEXT: movzbl -13(%rsp), %edx +; SSSE3-NEXT: movzbl -14(%rsp), %r11d +; SSSE3-NEXT: movzbl -15(%rsp), %esi +; SSSE3-NEXT: movzbl -16(%rsp), %r8d +; SSSE3-NEXT: movzbl -17(%rsp), %ecx +; SSSE3-NEXT: bsrl %ecx, %ecx +; SSSE3-NEXT: cmovel %eax, %ecx +; SSSE3-NEXT: xorl $7, %ecx +; SSSE3-NEXT: movd %ecx, %xmm1 +; SSSE3-NEXT: punpcklbw {{.*#+}} xmm1 = xmm1[0],xmm0[0],xmm1[1],xmm0[1],xmm1[2],xmm0[2],xmm1[3],xmm0[3],xmm1[4],xmm0[4],xmm1[5],xmm0[5],xmm1[6],xmm0[6],xmm1[7],xmm0[7] +; SSSE3-NEXT: bsrl %edx, %ecx +; SSSE3-NEXT: cmovel %eax, %ecx +; SSSE3-NEXT: xorl $7, %ecx +; SSSE3-NEXT: movd %ecx, %xmm2 +; SSSE3-NEXT: movzbl -18(%rsp), %edx +; SSSE3-NEXT: movzbl -19(%rsp), %ecx +; SSSE3-NEXT: movzbl -20(%rsp), %r10d +; SSSE3-NEXT: movzbl -21(%rsp), %ebp +; SSSE3-NEXT: bsrl %ebp, %ebp +; SSSE3-NEXT: cmovel %eax, %ebp +; SSSE3-NEXT: xorl $7, %ebp +; SSSE3-NEXT: movd %ebp, %xmm0 +; SSSE3-NEXT: punpcklbw {{.*#+}} xmm0 = xmm0[0],xmm2[0],xmm0[1],xmm2[1],xmm0[2],xmm2[2],xmm0[3],xmm2[3],xmm0[4],xmm2[4],xmm0[5],xmm2[5],xmm0[6],xmm2[6],xmm0[7],xmm2[7] +; SSSE3-NEXT: punpcklbw {{.*#+}} xmm0 = xmm0[0],xmm1[0],xmm0[1],xmm1[1],xmm0[2],xmm1[2],xmm0[3],xmm1[3],xmm0[4],xmm1[4],xmm0[5],xmm1[5],xmm0[6],xmm1[6],xmm0[7],xmm1[7] +; SSSE3-NEXT: bsrl %edi, %edi +; SSSE3-NEXT: cmovel %eax, %edi +; SSSE3-NEXT: xorl $7, %edi +; SSSE3-NEXT: movd %edi, %xmm1 +; SSSE3-NEXT: bsrl %ecx, %ecx +; SSSE3-NEXT: cmovel %eax, %ecx +; SSSE3-NEXT: xorl $7, %ecx +; SSSE3-NEXT: movd %ecx, %xmm2 +; SSSE3-NEXT: punpcklbw {{.*#+}} xmm2 = xmm2[0],xmm1[0],xmm2[1],xmm1[1],xmm2[2],xmm1[2],xmm2[3],xmm1[3],xmm2[4],xmm1[4],xmm2[5],xmm1[5],xmm2[6],xmm1[6],xmm2[7],xmm1[7] +; SSSE3-NEXT: bsrl %esi, %ecx +; SSSE3-NEXT: cmovel %eax, %ecx +; SSSE3-NEXT: xorl $7, %ecx +; SSSE3-NEXT: movd %ecx, %xmm3 +; SSSE3-NEXT: movzbl -22(%rsp), %esi +; SSSE3-NEXT: movzbl -23(%rsp), %ecx +; SSSE3-NEXT: bsrl %ecx, %ecx +; SSSE3-NEXT: cmovel %eax, %ecx +; SSSE3-NEXT: xorl $7, %ecx +; SSSE3-NEXT: movd %ecx, %xmm1 +; SSSE3-NEXT: punpcklbw {{.*#+}} xmm1 = xmm1[0],xmm3[0],xmm1[1],xmm3[1],xmm1[2],xmm3[2],xmm1[3],xmm3[3],xmm1[4],xmm3[4],xmm1[5],xmm3[5],xmm1[6],xmm3[6],xmm1[7],xmm3[7] +; SSSE3-NEXT: punpcklbw {{.*#+}} xmm1 = xmm1[0],xmm2[0],xmm1[1],xmm2[1],xmm1[2],xmm2[2],xmm1[3],xmm2[3],xmm1[4],xmm2[4],xmm1[5],xmm2[5],xmm1[6],xmm2[6],xmm1[7],xmm2[7] +; SSSE3-NEXT: punpcklbw {{.*#+}} xmm1 = xmm1[0],xmm0[0],xmm1[1],xmm0[1],xmm1[2],xmm0[2],xmm1[3],xmm0[3],xmm1[4],xmm0[4],xmm1[5],xmm0[5],xmm1[6],xmm0[6],xmm1[7],xmm0[7] +; SSSE3-NEXT: bsrl %ebx, %ecx +; SSSE3-NEXT: cmovel %eax, %ecx +; SSSE3-NEXT: xorl $7, %ecx +; SSSE3-NEXT: movd %ecx, %xmm0 +; SSSE3-NEXT: bsrl %edx, %ecx +; SSSE3-NEXT: cmovel %eax, %ecx +; SSSE3-NEXT: xorl $7, %ecx +; SSSE3-NEXT: movd %ecx, %xmm3 +; SSSE3-NEXT: punpcklbw {{.*#+}} xmm3 = xmm3[0],xmm0[0],xmm3[1],xmm0[1],xmm3[2],xmm0[2],xmm3[3],xmm0[3],xmm3[4],xmm0[4],xmm3[5],xmm0[5],xmm3[6],xmm0[6],xmm3[7],xmm0[7] +; SSSE3-NEXT: bsrl %r11d, %ecx +; SSSE3-NEXT: cmovel %eax, %ecx +; SSSE3-NEXT: xorl $7, %ecx +; SSSE3-NEXT: movd %ecx, %xmm0 +; SSSE3-NEXT: bsrl %esi, %ecx +; SSSE3-NEXT: cmovel %eax, %ecx +; SSSE3-NEXT: xorl $7, %ecx +; SSSE3-NEXT: movd %ecx, %xmm2 +; SSSE3-NEXT: punpcklbw {{.*#+}} xmm2 = xmm2[0],xmm0[0],xmm2[1],xmm0[1],xmm2[2],xmm0[2],xmm2[3],xmm0[3],xmm2[4],xmm0[4],xmm2[5],xmm0[5],xmm2[6],xmm0[6],xmm2[7],xmm0[7] +; SSSE3-NEXT: punpcklbw {{.*#+}} xmm2 = xmm2[0],xmm3[0],xmm2[1],xmm3[1],xmm2[2],xmm3[2],xmm2[3],xmm3[3],xmm2[4],xmm3[4],xmm2[5],xmm3[5],xmm2[6],xmm3[6],xmm2[7],xmm3[7] +; SSSE3-NEXT: bsrl %r9d, %ecx +; SSSE3-NEXT: cmovel %eax, %ecx +; SSSE3-NEXT: xorl $7, %ecx +; SSSE3-NEXT: movd %ecx, %xmm0 +; SSSE3-NEXT: bsrl %r10d, %ecx +; SSSE3-NEXT: cmovel %eax, %ecx +; SSSE3-NEXT: xorl $7, %ecx +; SSSE3-NEXT: movd %ecx, %xmm3 +; SSSE3-NEXT: punpcklbw {{.*#+}} xmm3 = xmm3[0],xmm0[0],xmm3[1],xmm0[1],xmm3[2],xmm0[2],xmm3[3],xmm0[3],xmm3[4],xmm0[4],xmm3[5],xmm0[5],xmm3[6],xmm0[6],xmm3[7],xmm0[7] +; SSSE3-NEXT: bsrl %r8d, %ecx +; SSSE3-NEXT: cmovel %eax, %ecx +; SSSE3-NEXT: xorl $7, %ecx +; SSSE3-NEXT: movd %ecx, %xmm4 +; SSSE3-NEXT: movzbl -24(%rsp), %ecx +; SSSE3-NEXT: bsrl %ecx, %ecx +; SSSE3-NEXT: cmovel %eax, %ecx +; SSSE3-NEXT: xorl $7, %ecx +; SSSE3-NEXT: movd %ecx, %xmm0 +; SSSE3-NEXT: punpcklbw {{.*#+}} xmm0 = xmm0[0],xmm4[0],xmm0[1],xmm4[1],xmm0[2],xmm4[2],xmm0[3],xmm4[3],xmm0[4],xmm4[4],xmm0[5],xmm4[5],xmm0[6],xmm4[6],xmm0[7],xmm4[7] +; SSSE3-NEXT: punpcklbw {{.*#+}} xmm0 = xmm0[0],xmm3[0],xmm0[1],xmm3[1],xmm0[2],xmm3[2],xmm0[3],xmm3[3],xmm0[4],xmm3[4],xmm0[5],xmm3[5],xmm0[6],xmm3[6],xmm0[7],xmm3[7] +; SSSE3-NEXT: punpcklbw {{.*#+}} xmm0 = xmm0[0],xmm2[0],xmm0[1],xmm2[1],xmm0[2],xmm2[2],xmm0[3],xmm2[3],xmm0[4],xmm2[4],xmm0[5],xmm2[5],xmm0[6],xmm2[6],xmm0[7],xmm2[7] +; SSSE3-NEXT: punpcklbw {{.*#+}} xmm0 = xmm0[0],xmm1[0],xmm0[1],xmm1[1],xmm0[2],xmm1[2],xmm0[3],xmm1[3],xmm0[4],xmm1[4],xmm0[5],xmm1[5],xmm0[6],xmm1[6],xmm0[7],xmm1[7] +; SSSE3-NEXT: popq %rbx +; SSSE3-NEXT: popq %rbp +; SSSE3-NEXT: retq +; +; SSE41-LABEL: testv16i8: +; SSE41: # BB#0: +; SSE41-NEXT: pextrb $1, %xmm0, %eax +; SSE41-NEXT: bsrl %eax, %ecx +; SSE41-NEXT: movl $15, %eax +; SSE41-NEXT: cmovel %eax, %ecx +; SSE41-NEXT: xorl $7, %ecx +; SSE41-NEXT: pextrb $0, %xmm0, %edx +; SSE41-NEXT: bsrl %edx, %edx +; SSE41-NEXT: cmovel %eax, %edx +; SSE41-NEXT: xorl $7, %edx +; SSE41-NEXT: movd %edx, %xmm1 +; SSE41-NEXT: pinsrb $1, %ecx, %xmm1 +; SSE41-NEXT: pextrb $2, %xmm0, %ecx +; SSE41-NEXT: bsrl %ecx, %ecx +; SSE41-NEXT: cmovel %eax, %ecx +; SSE41-NEXT: xorl $7, %ecx +; SSE41-NEXT: pinsrb $2, %ecx, %xmm1 +; SSE41-NEXT: pextrb $3, %xmm0, %ecx +; SSE41-NEXT: bsrl %ecx, %ecx +; SSE41-NEXT: cmovel %eax, %ecx +; SSE41-NEXT: xorl $7, %ecx +; SSE41-NEXT: pinsrb $3, %ecx, %xmm1 +; SSE41-NEXT: pextrb $4, %xmm0, %ecx +; SSE41-NEXT: bsrl %ecx, %ecx +; SSE41-NEXT: cmovel %eax, %ecx +; SSE41-NEXT: xorl $7, %ecx +; SSE41-NEXT: pinsrb $4, %ecx, %xmm1 +; SSE41-NEXT: pextrb $5, %xmm0, %ecx +; SSE41-NEXT: bsrl %ecx, %ecx +; SSE41-NEXT: cmovel %eax, %ecx +; SSE41-NEXT: xorl $7, %ecx +; SSE41-NEXT: pinsrb $5, %ecx, %xmm1 +; SSE41-NEXT: pextrb $6, %xmm0, %ecx +; SSE41-NEXT: bsrl %ecx, %ecx +; SSE41-NEXT: cmovel %eax, %ecx +; SSE41-NEXT: xorl $7, %ecx +; SSE41-NEXT: pinsrb $6, %ecx, %xmm1 +; SSE41-NEXT: pextrb $7, %xmm0, %ecx +; SSE41-NEXT: bsrl %ecx, %ecx +; SSE41-NEXT: cmovel %eax, %ecx +; SSE41-NEXT: xorl $7, %ecx +; SSE41-NEXT: pinsrb $7, %ecx, %xmm1 +; SSE41-NEXT: pextrb $8, %xmm0, %ecx +; SSE41-NEXT: bsrl %ecx, %ecx +; SSE41-NEXT: cmovel %eax, %ecx +; SSE41-NEXT: xorl $7, %ecx +; SSE41-NEXT: pinsrb $8, %ecx, %xmm1 +; SSE41-NEXT: pextrb $9, %xmm0, %ecx +; SSE41-NEXT: bsrl %ecx, %ecx +; SSE41-NEXT: cmovel %eax, %ecx +; SSE41-NEXT: xorl $7, %ecx +; SSE41-NEXT: pinsrb $9, %ecx, %xmm1 +; SSE41-NEXT: pextrb $10, %xmm0, %ecx +; SSE41-NEXT: bsrl %ecx, %ecx +; SSE41-NEXT: cmovel %eax, %ecx +; SSE41-NEXT: xorl $7, %ecx +; SSE41-NEXT: pinsrb $10, %ecx, %xmm1 +; SSE41-NEXT: pextrb $11, %xmm0, %ecx +; SSE41-NEXT: bsrl %ecx, %ecx +; SSE41-NEXT: cmovel %eax, %ecx +; SSE41-NEXT: xorl $7, %ecx +; SSE41-NEXT: pinsrb $11, %ecx, %xmm1 +; SSE41-NEXT: pextrb $12, %xmm0, %ecx +; SSE41-NEXT: bsrl %ecx, %ecx +; SSE41-NEXT: cmovel %eax, %ecx +; SSE41-NEXT: xorl $7, %ecx +; SSE41-NEXT: pinsrb $12, %ecx, %xmm1 +; SSE41-NEXT: pextrb $13, %xmm0, %ecx +; SSE41-NEXT: bsrl %ecx, %ecx +; SSE41-NEXT: cmovel %eax, %ecx +; SSE41-NEXT: xorl $7, %ecx +; SSE41-NEXT: pinsrb $13, %ecx, %xmm1 +; SSE41-NEXT: pextrb $14, %xmm0, %ecx +; SSE41-NEXT: bsrl %ecx, %ecx +; SSE41-NEXT: cmovel %eax, %ecx +; SSE41-NEXT: xorl $7, %ecx +; SSE41-NEXT: pinsrb $14, %ecx, %xmm1 +; SSE41-NEXT: pextrb $15, %xmm0, %ecx +; SSE41-NEXT: bsrl %ecx, %ecx +; SSE41-NEXT: cmovel %eax, %ecx +; SSE41-NEXT: xorl $7, %ecx +; SSE41-NEXT: pinsrb $15, %ecx, %xmm1 +; SSE41-NEXT: movdqa %xmm1, %xmm0 +; SSE41-NEXT: retq +; +; AVX-LABEL: testv16i8: +; AVX: # BB#0: +; AVX-NEXT: vpextrb $1, %xmm0, %eax +; AVX-NEXT: bsrl %eax, %ecx +; AVX-NEXT: movl $15, %eax +; AVX-NEXT: cmovel %eax, %ecx +; AVX-NEXT: xorl $7, %ecx +; AVX-NEXT: vpextrb $0, %xmm0, %edx +; AVX-NEXT: bsrl %edx, %edx +; AVX-NEXT: cmovel %eax, %edx +; AVX-NEXT: xorl $7, %edx +; AVX-NEXT: vmovd %edx, %xmm1 +; AVX-NEXT: vpinsrb $1, %ecx, %xmm1, %xmm1 +; AVX-NEXT: vpextrb $2, %xmm0, %ecx +; AVX-NEXT: bsrl %ecx, %ecx +; AVX-NEXT: cmovel %eax, %ecx +; AVX-NEXT: xorl $7, %ecx +; AVX-NEXT: vpinsrb $2, %ecx, %xmm1, %xmm1 +; AVX-NEXT: vpextrb $3, %xmm0, %ecx +; AVX-NEXT: bsrl %ecx, %ecx +; AVX-NEXT: cmovel %eax, %ecx +; AVX-NEXT: xorl $7, %ecx +; AVX-NEXT: vpinsrb $3, %ecx, %xmm1, %xmm1 +; AVX-NEXT: vpextrb $4, %xmm0, %ecx +; AVX-NEXT: bsrl %ecx, %ecx +; AVX-NEXT: cmovel %eax, %ecx +; AVX-NEXT: xorl $7, %ecx +; AVX-NEXT: vpinsrb $4, %ecx, %xmm1, %xmm1 +; AVX-NEXT: vpextrb $5, %xmm0, %ecx +; AVX-NEXT: bsrl %ecx, %ecx +; AVX-NEXT: cmovel %eax, %ecx +; AVX-NEXT: xorl $7, %ecx +; AVX-NEXT: vpinsrb $5, %ecx, %xmm1, %xmm1 +; AVX-NEXT: vpextrb $6, %xmm0, %ecx +; AVX-NEXT: bsrl %ecx, %ecx +; AVX-NEXT: cmovel %eax, %ecx +; AVX-NEXT: xorl $7, %ecx +; AVX-NEXT: vpinsrb $6, %ecx, %xmm1, %xmm1 +; AVX-NEXT: vpextrb $7, %xmm0, %ecx +; AVX-NEXT: bsrl %ecx, %ecx +; AVX-NEXT: cmovel %eax, %ecx +; AVX-NEXT: xorl $7, %ecx +; AVX-NEXT: vpinsrb $7, %ecx, %xmm1, %xmm1 +; AVX-NEXT: vpextrb $8, %xmm0, %ecx +; AVX-NEXT: bsrl %ecx, %ecx +; AVX-NEXT: cmovel %eax, %ecx +; AVX-NEXT: xorl $7, %ecx +; AVX-NEXT: vpinsrb $8, %ecx, %xmm1, %xmm1 +; AVX-NEXT: vpextrb $9, %xmm0, %ecx +; AVX-NEXT: bsrl %ecx, %ecx +; AVX-NEXT: cmovel %eax, %ecx +; AVX-NEXT: xorl $7, %ecx +; AVX-NEXT: vpinsrb $9, %ecx, %xmm1, %xmm1 +; AVX-NEXT: vpextrb $10, %xmm0, %ecx +; AVX-NEXT: bsrl %ecx, %ecx +; AVX-NEXT: cmovel %eax, %ecx +; AVX-NEXT: xorl $7, %ecx +; AVX-NEXT: vpinsrb $10, %ecx, %xmm1, %xmm1 +; AVX-NEXT: vpextrb $11, %xmm0, %ecx +; AVX-NEXT: bsrl %ecx, %ecx +; AVX-NEXT: cmovel %eax, %ecx +; AVX-NEXT: xorl $7, %ecx +; AVX-NEXT: vpinsrb $11, %ecx, %xmm1, %xmm1 +; AVX-NEXT: vpextrb $12, %xmm0, %ecx +; AVX-NEXT: bsrl %ecx, %ecx +; AVX-NEXT: cmovel %eax, %ecx +; AVX-NEXT: xorl $7, %ecx +; AVX-NEXT: vpinsrb $12, %ecx, %xmm1, %xmm1 +; AVX-NEXT: vpextrb $13, %xmm0, %ecx +; AVX-NEXT: bsrl %ecx, %ecx +; AVX-NEXT: cmovel %eax, %ecx +; AVX-NEXT: xorl $7, %ecx +; AVX-NEXT: vpinsrb $13, %ecx, %xmm1, %xmm1 +; AVX-NEXT: vpextrb $14, %xmm0, %ecx +; AVX-NEXT: bsrl %ecx, %ecx +; AVX-NEXT: cmovel %eax, %ecx +; AVX-NEXT: xorl $7, %ecx +; AVX-NEXT: vpinsrb $14, %ecx, %xmm1, %xmm1 +; AVX-NEXT: vpextrb $15, %xmm0, %ecx +; AVX-NEXT: bsrl %ecx, %ecx +; AVX-NEXT: cmovel %eax, %ecx +; AVX-NEXT: xorl $7, %ecx +; AVX-NEXT: vpinsrb $15, %ecx, %xmm1, %xmm0 +; AVX-NEXT: retq + %out = call <16 x i8> @llvm.ctlz.v16i8(<16 x i8> %in, i1 0) + ret <16 x i8> %out +} + +define <16 x i8> @testv16i8u(<16 x i8> %in) { +; SSE2-LABEL: testv16i8u: +; SSE2: # BB#0: +; SSE2: pushq %rbx +; SSE2: movaps %xmm0, -16(%rsp) +; SSE2-NEXT: movzbl -1(%rsp), %eax +; SSE2-NEXT: bsrl %eax, %eax +; SSE2-NEXT: xorl $7, %eax +; SSE2-NEXT: movd %eax, %xmm0 +; SSE2-NEXT: movzbl -2(%rsp), %edi +; SSE2-NEXT: movzbl -3(%rsp), %edx +; SSE2-NEXT: movzbl -4(%rsp), %r9d +; SSE2-NEXT: movzbl -5(%rsp), %eax +; SSE2-NEXT: movzbl -6(%rsp), %r10d +; SSE2-NEXT: movzbl -7(%rsp), %ecx +; SSE2-NEXT: movzbl -8(%rsp), %r8d +; SSE2-NEXT: movzbl -9(%rsp), %esi +; SSE2-NEXT: bsrl %esi, %esi +; SSE2-NEXT: xorl $7, %esi +; SSE2-NEXT: movd %esi, %xmm1 +; SSE2-NEXT: punpcklbw {{.*#+}} xmm1 = xmm1[0],xmm0[0],xmm1[1],xmm0[1],xmm1[2],xmm0[2],xmm1[3],xmm0[3],xmm1[4],xmm0[4],xmm1[5],xmm0[5],xmm1[6],xmm0[6],xmm1[7],xmm0[7] +; SSE2-NEXT: bsrl %eax, %eax +; SSE2-NEXT: xorl $7, %eax +; SSE2-NEXT: movd %eax, %xmm0 +; SSE2-NEXT: movzbl -10(%rsp), %eax +; SSE2-NEXT: movzbl -11(%rsp), %esi +; SSE2-NEXT: movzbl -12(%rsp), %r11d +; SSE2-NEXT: movzbl -13(%rsp), %ebx +; SSE2-NEXT: bsrl %ebx, %ebx +; SSE2-NEXT: xorl $7, %ebx +; SSE2-NEXT: movd %ebx, %xmm2 +; SSE2-NEXT: punpcklbw {{.*#+}} xmm2 = xmm2[0],xmm0[0],xmm2[1],xmm0[1],xmm2[2],xmm0[2],xmm2[3],xmm0[3],xmm2[4],xmm0[4],xmm2[5],xmm0[5],xmm2[6],xmm0[6],xmm2[7],xmm0[7] +; SSE2-NEXT: punpcklbw {{.*#+}} xmm2 = xmm2[0],xmm1[0],xmm2[1],xmm1[1],xmm2[2],xmm1[2],xmm2[3],xmm1[3],xmm2[4],xmm1[4],xmm2[5],xmm1[5],xmm2[6],xmm1[6],xmm2[7],xmm1[7] +; SSE2-NEXT: bsrl %edx, %edx +; SSE2-NEXT: xorl $7, %edx +; SSE2-NEXT: movd %edx, %xmm0 +; SSE2-NEXT: bsrl %esi, %edx +; SSE2-NEXT: xorl $7, %edx +; SSE2-NEXT: movd %edx, %xmm3 +; SSE2-NEXT: punpcklbw {{.*#+}} xmm3 = xmm3[0],xmm0[0],xmm3[1],xmm0[1],xmm3[2],xmm0[2],xmm3[3],xmm0[3],xmm3[4],xmm0[4],xmm3[5],xmm0[5],xmm3[6],xmm0[6],xmm3[7],xmm0[7] +; SSE2-NEXT: bsrl %ecx, %ecx +; SSE2-NEXT: xorl $7, %ecx +; SSE2-NEXT: movd %ecx, %xmm0 +; SSE2-NEXT: movzbl -14(%rsp), %ecx +; SSE2-NEXT: movzbl -15(%rsp), %edx +; SSE2-NEXT: bsrl %edx, %edx +; SSE2-NEXT: xorl $7, %edx +; SSE2-NEXT: movd %edx, %xmm1 +; SSE2-NEXT: punpcklbw {{.*#+}} xmm1 = xmm1[0],xmm0[0],xmm1[1],xmm0[1],xmm1[2],xmm0[2],xmm1[3],xmm0[3],xmm1[4],xmm0[4],xmm1[5],xmm0[5],xmm1[6],xmm0[6],xmm1[7],xmm0[7] +; SSE2-NEXT: punpcklbw {{.*#+}} xmm1 = xmm1[0],xmm3[0],xmm1[1],xmm3[1],xmm1[2],xmm3[2],xmm1[3],xmm3[3],xmm1[4],xmm3[4],xmm1[5],xmm3[5],xmm1[6],xmm3[6],xmm1[7],xmm3[7] +; SSE2-NEXT: punpcklbw {{.*#+}} xmm1 = xmm1[0],xmm2[0],xmm1[1],xmm2[1],xmm1[2],xmm2[2],xmm1[3],xmm2[3],xmm1[4],xmm2[4],xmm1[5],xmm2[5],xmm1[6],xmm2[6],xmm1[7],xmm2[7] +; SSE2-NEXT: bsrl %edi, %edx +; SSE2-NEXT: xorl $7, %edx +; SSE2-NEXT: movd %edx, %xmm0 +; SSE2-NEXT: bsrl %eax, %eax +; SSE2-NEXT: xorl $7, %eax +; SSE2-NEXT: movd %eax, %xmm2 +; SSE2-NEXT: punpcklbw {{.*#+}} xmm2 = xmm2[0],xmm0[0],xmm2[1],xmm0[1],xmm2[2],xmm0[2],xmm2[3],xmm0[3],xmm2[4],xmm0[4],xmm2[5],xmm0[5],xmm2[6],xmm0[6],xmm2[7],xmm0[7] +; SSE2-NEXT: bsrl %r10d, %eax +; SSE2-NEXT: xorl $7, %eax +; SSE2-NEXT: movd %eax, %xmm0 +; SSE2-NEXT: bsrl %ecx, %eax +; SSE2-NEXT: xorl $7, %eax +; SSE2-NEXT: movd %eax, %xmm3 +; SSE2-NEXT: punpcklbw {{.*#+}} xmm3 = xmm3[0],xmm0[0],xmm3[1],xmm0[1],xmm3[2],xmm0[2],xmm3[3],xmm0[3],xmm3[4],xmm0[4],xmm3[5],xmm0[5],xmm3[6],xmm0[6],xmm3[7],xmm0[7] +; SSE2-NEXT: punpcklbw {{.*#+}} xmm3 = xmm3[0],xmm2[0],xmm3[1],xmm2[1],xmm3[2],xmm2[2],xmm3[3],xmm2[3],xmm3[4],xmm2[4],xmm3[5],xmm2[5],xmm3[6],xmm2[6],xmm3[7],xmm2[7] +; SSE2-NEXT: bsrl %r9d, %eax +; SSE2-NEXT: xorl $7, %eax +; SSE2-NEXT: movd %eax, %xmm0 +; SSE2-NEXT: bsrl %r11d, %eax +; SSE2-NEXT: xorl $7, %eax +; SSE2-NEXT: movd %eax, %xmm2 +; SSE2-NEXT: punpcklbw {{.*#+}} xmm2 = xmm2[0],xmm0[0],xmm2[1],xmm0[1],xmm2[2],xmm0[2],xmm2[3],xmm0[3],xmm2[4],xmm0[4],xmm2[5],xmm0[5],xmm2[6],xmm0[6],xmm2[7],xmm0[7] +; SSE2-NEXT: bsrl %r8d, %eax +; SSE2-NEXT: xorl $7, %eax +; SSE2-NEXT: movd %eax, %xmm4 +; SSE2-NEXT: movzbl -16(%rsp), %eax +; SSE2-NEXT: bsrl %eax, %eax +; SSE2-NEXT: xorl $7, %eax +; SSE2-NEXT: movd %eax, %xmm0 +; SSE2-NEXT: punpcklbw {{.*#+}} xmm0 = xmm0[0],xmm4[0],xmm0[1],xmm4[1],xmm0[2],xmm4[2],xmm0[3],xmm4[3],xmm0[4],xmm4[4],xmm0[5],xmm4[5],xmm0[6],xmm4[6],xmm0[7],xmm4[7] +; SSE2-NEXT: punpcklbw {{.*#+}} xmm0 = xmm0[0],xmm2[0],xmm0[1],xmm2[1],xmm0[2],xmm2[2],xmm0[3],xmm2[3],xmm0[4],xmm2[4],xmm0[5],xmm2[5],xmm0[6],xmm2[6],xmm0[7],xmm2[7] +; SSE2-NEXT: punpcklbw {{.*#+}} xmm0 = xmm0[0],xmm3[0],xmm0[1],xmm3[1],xmm0[2],xmm3[2],xmm0[3],xmm3[3],xmm0[4],xmm3[4],xmm0[5],xmm3[5],xmm0[6],xmm3[6],xmm0[7],xmm3[7] +; SSE2-NEXT: punpcklbw {{.*#+}} xmm0 = xmm0[0],xmm1[0],xmm0[1],xmm1[1],xmm0[2],xmm1[2],xmm0[3],xmm1[3],xmm0[4],xmm1[4],xmm0[5],xmm1[5],xmm0[6],xmm1[6],xmm0[7],xmm1[7] +; SSE2-NEXT: popq %rbx +; SSE2-NEXT: retq +; +; SSE3-LABEL: testv16i8u: +; SSE3: # BB#0: +; SSE3: pushq %rbx +; SSE3: movaps %xmm0, -16(%rsp) +; SSE3-NEXT: movzbl -1(%rsp), %eax +; SSE3-NEXT: bsrl %eax, %eax +; SSE3-NEXT: xorl $7, %eax +; SSE3-NEXT: movd %eax, %xmm0 +; SSE3-NEXT: movzbl -2(%rsp), %edi +; SSE3-NEXT: movzbl -3(%rsp), %edx +; SSE3-NEXT: movzbl -4(%rsp), %r9d +; SSE3-NEXT: movzbl -5(%rsp), %eax +; SSE3-NEXT: movzbl -6(%rsp), %r10d +; SSE3-NEXT: movzbl -7(%rsp), %ecx +; SSE3-NEXT: movzbl -8(%rsp), %r8d +; SSE3-NEXT: movzbl -9(%rsp), %esi +; SSE3-NEXT: bsrl %esi, %esi +; SSE3-NEXT: xorl $7, %esi +; SSE3-NEXT: movd %esi, %xmm1 +; SSE3-NEXT: punpcklbw {{.*#+}} xmm1 = xmm1[0],xmm0[0],xmm1[1],xmm0[1],xmm1[2],xmm0[2],xmm1[3],xmm0[3],xmm1[4],xmm0[4],xmm1[5],xmm0[5],xmm1[6],xmm0[6],xmm1[7],xmm0[7] +; SSE3-NEXT: bsrl %eax, %eax +; SSE3-NEXT: xorl $7, %eax +; SSE3-NEXT: movd %eax, %xmm0 +; SSE3-NEXT: movzbl -10(%rsp), %eax +; SSE3-NEXT: movzbl -11(%rsp), %esi +; SSE3-NEXT: movzbl -12(%rsp), %r11d +; SSE3-NEXT: movzbl -13(%rsp), %ebx +; SSE3-NEXT: bsrl %ebx, %ebx +; SSE3-NEXT: xorl $7, %ebx +; SSE3-NEXT: movd %ebx, %xmm2 +; SSE3-NEXT: punpcklbw {{.*#+}} xmm2 = xmm2[0],xmm0[0],xmm2[1],xmm0[1],xmm2[2],xmm0[2],xmm2[3],xmm0[3],xmm2[4],xmm0[4],xmm2[5],xmm0[5],xmm2[6],xmm0[6],xmm2[7],xmm0[7] +; SSE3-NEXT: punpcklbw {{.*#+}} xmm2 = xmm2[0],xmm1[0],xmm2[1],xmm1[1],xmm2[2],xmm1[2],xmm2[3],xmm1[3],xmm2[4],xmm1[4],xmm2[5],xmm1[5],xmm2[6],xmm1[6],xmm2[7],xmm1[7] +; SSE3-NEXT: bsrl %edx, %edx +; SSE3-NEXT: xorl $7, %edx +; SSE3-NEXT: movd %edx, %xmm0 +; SSE3-NEXT: bsrl %esi, %edx +; SSE3-NEXT: xorl $7, %edx +; SSE3-NEXT: movd %edx, %xmm3 +; SSE3-NEXT: punpcklbw {{.*#+}} xmm3 = xmm3[0],xmm0[0],xmm3[1],xmm0[1],xmm3[2],xmm0[2],xmm3[3],xmm0[3],xmm3[4],xmm0[4],xmm3[5],xmm0[5],xmm3[6],xmm0[6],xmm3[7],xmm0[7] +; SSE3-NEXT: bsrl %ecx, %ecx +; SSE3-NEXT: xorl $7, %ecx +; SSE3-NEXT: movd %ecx, %xmm0 +; SSE3-NEXT: movzbl -14(%rsp), %ecx +; SSE3-NEXT: movzbl -15(%rsp), %edx +; SSE3-NEXT: bsrl %edx, %edx +; SSE3-NEXT: xorl $7, %edx +; SSE3-NEXT: movd %edx, %xmm1 +; SSE3-NEXT: punpcklbw {{.*#+}} xmm1 = xmm1[0],xmm0[0],xmm1[1],xmm0[1],xmm1[2],xmm0[2],xmm1[3],xmm0[3],xmm1[4],xmm0[4],xmm1[5],xmm0[5],xmm1[6],xmm0[6],xmm1[7],xmm0[7] +; SSE3-NEXT: punpcklbw {{.*#+}} xmm1 = xmm1[0],xmm3[0],xmm1[1],xmm3[1],xmm1[2],xmm3[2],xmm1[3],xmm3[3],xmm1[4],xmm3[4],xmm1[5],xmm3[5],xmm1[6],xmm3[6],xmm1[7],xmm3[7] +; SSE3-NEXT: punpcklbw {{.*#+}} xmm1 = xmm1[0],xmm2[0],xmm1[1],xmm2[1],xmm1[2],xmm2[2],xmm1[3],xmm2[3],xmm1[4],xmm2[4],xmm1[5],xmm2[5],xmm1[6],xmm2[6],xmm1[7],xmm2[7] +; SSE3-NEXT: bsrl %edi, %edx +; SSE3-NEXT: xorl $7, %edx +; SSE3-NEXT: movd %edx, %xmm0 +; SSE3-NEXT: bsrl %eax, %eax +; SSE3-NEXT: xorl $7, %eax +; SSE3-NEXT: movd %eax, %xmm2 +; SSE3-NEXT: punpcklbw {{.*#+}} xmm2 = xmm2[0],xmm0[0],xmm2[1],xmm0[1],xmm2[2],xmm0[2],xmm2[3],xmm0[3],xmm2[4],xmm0[4],xmm2[5],xmm0[5],xmm2[6],xmm0[6],xmm2[7],xmm0[7] +; SSE3-NEXT: bsrl %r10d, %eax +; SSE3-NEXT: xorl $7, %eax +; SSE3-NEXT: movd %eax, %xmm0 +; SSE3-NEXT: bsrl %ecx, %eax +; SSE3-NEXT: xorl $7, %eax +; SSE3-NEXT: movd %eax, %xmm3 +; SSE3-NEXT: punpcklbw {{.*#+}} xmm3 = xmm3[0],xmm0[0],xmm3[1],xmm0[1],xmm3[2],xmm0[2],xmm3[3],xmm0[3],xmm3[4],xmm0[4],xmm3[5],xmm0[5],xmm3[6],xmm0[6],xmm3[7],xmm0[7] +; SSE3-NEXT: punpcklbw {{.*#+}} xmm3 = xmm3[0],xmm2[0],xmm3[1],xmm2[1],xmm3[2],xmm2[2],xmm3[3],xmm2[3],xmm3[4],xmm2[4],xmm3[5],xmm2[5],xmm3[6],xmm2[6],xmm3[7],xmm2[7] +; SSE3-NEXT: bsrl %r9d, %eax +; SSE3-NEXT: xorl $7, %eax +; SSE3-NEXT: movd %eax, %xmm0 +; SSE3-NEXT: bsrl %r11d, %eax +; SSE3-NEXT: xorl $7, %eax +; SSE3-NEXT: movd %eax, %xmm2 +; SSE3-NEXT: punpcklbw {{.*#+}} xmm2 = xmm2[0],xmm0[0],xmm2[1],xmm0[1],xmm2[2],xmm0[2],xmm2[3],xmm0[3],xmm2[4],xmm0[4],xmm2[5],xmm0[5],xmm2[6],xmm0[6],xmm2[7],xmm0[7] +; SSE3-NEXT: bsrl %r8d, %eax +; SSE3-NEXT: xorl $7, %eax +; SSE3-NEXT: movd %eax, %xmm4 +; SSE3-NEXT: movzbl -16(%rsp), %eax +; SSE3-NEXT: bsrl %eax, %eax +; SSE3-NEXT: xorl $7, %eax +; SSE3-NEXT: movd %eax, %xmm0 +; SSE3-NEXT: punpcklbw {{.*#+}} xmm0 = xmm0[0],xmm4[0],xmm0[1],xmm4[1],xmm0[2],xmm4[2],xmm0[3],xmm4[3],xmm0[4],xmm4[4],xmm0[5],xmm4[5],xmm0[6],xmm4[6],xmm0[7],xmm4[7] +; SSE3-NEXT: punpcklbw {{.*#+}} xmm0 = xmm0[0],xmm2[0],xmm0[1],xmm2[1],xmm0[2],xmm2[2],xmm0[3],xmm2[3],xmm0[4],xmm2[4],xmm0[5],xmm2[5],xmm0[6],xmm2[6],xmm0[7],xmm2[7] +; SSE3-NEXT: punpcklbw {{.*#+}} xmm0 = xmm0[0],xmm3[0],xmm0[1],xmm3[1],xmm0[2],xmm3[2],xmm0[3],xmm3[3],xmm0[4],xmm3[4],xmm0[5],xmm3[5],xmm0[6],xmm3[6],xmm0[7],xmm3[7] +; SSE3-NEXT: punpcklbw {{.*#+}} xmm0 = xmm0[0],xmm1[0],xmm0[1],xmm1[1],xmm0[2],xmm1[2],xmm0[3],xmm1[3],xmm0[4],xmm1[4],xmm0[5],xmm1[5],xmm0[6],xmm1[6],xmm0[7],xmm1[7] +; SSE3-NEXT: popq %rbx +; SSE3-NEXT: retq +; +; SSSE3-LABEL: testv16i8u: +; SSSE3: # BB#0: +; SSSE3: pushq %rbx +; SSSE3: movaps %xmm0, -16(%rsp) +; SSSE3-NEXT: movzbl -1(%rsp), %eax +; SSSE3-NEXT: bsrl %eax, %eax +; SSSE3-NEXT: xorl $7, %eax +; SSSE3-NEXT: movd %eax, %xmm0 +; SSSE3-NEXT: movzbl -2(%rsp), %edi +; SSSE3-NEXT: movzbl -3(%rsp), %edx +; SSSE3-NEXT: movzbl -4(%rsp), %r9d +; SSSE3-NEXT: movzbl -5(%rsp), %eax +; SSSE3-NEXT: movzbl -6(%rsp), %r10d +; SSSE3-NEXT: movzbl -7(%rsp), %ecx +; SSSE3-NEXT: movzbl -8(%rsp), %r8d +; SSSE3-NEXT: movzbl -9(%rsp), %esi +; SSSE3-NEXT: bsrl %esi, %esi +; SSSE3-NEXT: xorl $7, %esi +; SSSE3-NEXT: movd %esi, %xmm1 +; SSSE3-NEXT: punpcklbw {{.*#+}} xmm1 = xmm1[0],xmm0[0],xmm1[1],xmm0[1],xmm1[2],xmm0[2],xmm1[3],xmm0[3],xmm1[4],xmm0[4],xmm1[5],xmm0[5],xmm1[6],xmm0[6],xmm1[7],xmm0[7] +; SSSE3-NEXT: bsrl %eax, %eax +; SSSE3-NEXT: xorl $7, %eax +; SSSE3-NEXT: movd %eax, %xmm0 +; SSSE3-NEXT: movzbl -10(%rsp), %eax +; SSSE3-NEXT: movzbl -11(%rsp), %esi +; SSSE3-NEXT: movzbl -12(%rsp), %r11d +; SSSE3-NEXT: movzbl -13(%rsp), %ebx +; SSSE3-NEXT: bsrl %ebx, %ebx +; SSSE3-NEXT: xorl $7, %ebx +; SSSE3-NEXT: movd %ebx, %xmm2 +; SSSE3-NEXT: punpcklbw {{.*#+}} xmm2 = xmm2[0],xmm0[0],xmm2[1],xmm0[1],xmm2[2],xmm0[2],xmm2[3],xmm0[3],xmm2[4],xmm0[4],xmm2[5],xmm0[5],xmm2[6],xmm0[6],xmm2[7],xmm0[7] +; SSSE3-NEXT: punpcklbw {{.*#+}} xmm2 = xmm2[0],xmm1[0],xmm2[1],xmm1[1],xmm2[2],xmm1[2],xmm2[3],xmm1[3],xmm2[4],xmm1[4],xmm2[5],xmm1[5],xmm2[6],xmm1[6],xmm2[7],xmm1[7] +; SSSE3-NEXT: bsrl %edx, %edx +; SSSE3-NEXT: xorl $7, %edx +; SSSE3-NEXT: movd %edx, %xmm0 +; SSSE3-NEXT: bsrl %esi, %edx +; SSSE3-NEXT: xorl $7, %edx +; SSSE3-NEXT: movd %edx, %xmm3 +; SSSE3-NEXT: punpcklbw {{.*#+}} xmm3 = xmm3[0],xmm0[0],xmm3[1],xmm0[1],xmm3[2],xmm0[2],xmm3[3],xmm0[3],xmm3[4],xmm0[4],xmm3[5],xmm0[5],xmm3[6],xmm0[6],xmm3[7],xmm0[7] +; SSSE3-NEXT: bsrl %ecx, %ecx +; SSSE3-NEXT: xorl $7, %ecx +; SSSE3-NEXT: movd %ecx, %xmm0 +; SSSE3-NEXT: movzbl -14(%rsp), %ecx +; SSSE3-NEXT: movzbl -15(%rsp), %edx +; SSSE3-NEXT: bsrl %edx, %edx +; SSSE3-NEXT: xorl $7, %edx +; SSSE3-NEXT: movd %edx, %xmm1 +; SSSE3-NEXT: punpcklbw {{.*#+}} xmm1 = xmm1[0],xmm0[0],xmm1[1],xmm0[1],xmm1[2],xmm0[2],xmm1[3],xmm0[3],xmm1[4],xmm0[4],xmm1[5],xmm0[5],xmm1[6],xmm0[6],xmm1[7],xmm0[7] +; SSSE3-NEXT: punpcklbw {{.*#+}} xmm1 = xmm1[0],xmm3[0],xmm1[1],xmm3[1],xmm1[2],xmm3[2],xmm1[3],xmm3[3],xmm1[4],xmm3[4],xmm1[5],xmm3[5],xmm1[6],xmm3[6],xmm1[7],xmm3[7] +; SSSE3-NEXT: punpcklbw {{.*#+}} xmm1 = xmm1[0],xmm2[0],xmm1[1],xmm2[1],xmm1[2],xmm2[2],xmm1[3],xmm2[3],xmm1[4],xmm2[4],xmm1[5],xmm2[5],xmm1[6],xmm2[6],xmm1[7],xmm2[7] +; SSSE3-NEXT: bsrl %edi, %edx +; SSSE3-NEXT: xorl $7, %edx +; SSSE3-NEXT: movd %edx, %xmm0 +; SSSE3-NEXT: bsrl %eax, %eax +; SSSE3-NEXT: xorl $7, %eax +; SSSE3-NEXT: movd %eax, %xmm2 +; SSSE3-NEXT: punpcklbw {{.*#+}} xmm2 = xmm2[0],xmm0[0],xmm2[1],xmm0[1],xmm2[2],xmm0[2],xmm2[3],xmm0[3],xmm2[4],xmm0[4],xmm2[5],xmm0[5],xmm2[6],xmm0[6],xmm2[7],xmm0[7] +; SSSE3-NEXT: bsrl %r10d, %eax +; SSSE3-NEXT: xorl $7, %eax +; SSSE3-NEXT: movd %eax, %xmm0 +; SSSE3-NEXT: bsrl %ecx, %eax +; SSSE3-NEXT: xorl $7, %eax +; SSSE3-NEXT: movd %eax, %xmm3 +; SSSE3-NEXT: punpcklbw {{.*#+}} xmm3 = xmm3[0],xmm0[0],xmm3[1],xmm0[1],xmm3[2],xmm0[2],xmm3[3],xmm0[3],xmm3[4],xmm0[4],xmm3[5],xmm0[5],xmm3[6],xmm0[6],xmm3[7],xmm0[7] +; SSSE3-NEXT: punpcklbw {{.*#+}} xmm3 = xmm3[0],xmm2[0],xmm3[1],xmm2[1],xmm3[2],xmm2[2],xmm3[3],xmm2[3],xmm3[4],xmm2[4],xmm3[5],xmm2[5],xmm3[6],xmm2[6],xmm3[7],xmm2[7] +; SSSE3-NEXT: bsrl %r9d, %eax +; SSSE3-NEXT: xorl $7, %eax +; SSSE3-NEXT: movd %eax, %xmm0 +; SSSE3-NEXT: bsrl %r11d, %eax +; SSSE3-NEXT: xorl $7, %eax +; SSSE3-NEXT: movd %eax, %xmm2 +; SSSE3-NEXT: punpcklbw {{.*#+}} xmm2 = xmm2[0],xmm0[0],xmm2[1],xmm0[1],xmm2[2],xmm0[2],xmm2[3],xmm0[3],xmm2[4],xmm0[4],xmm2[5],xmm0[5],xmm2[6],xmm0[6],xmm2[7],xmm0[7] +; SSSE3-NEXT: bsrl %r8d, %eax +; SSSE3-NEXT: xorl $7, %eax +; SSSE3-NEXT: movd %eax, %xmm4 +; SSSE3-NEXT: movzbl -16(%rsp), %eax +; SSSE3-NEXT: bsrl %eax, %eax +; SSSE3-NEXT: xorl $7, %eax +; SSSE3-NEXT: movd %eax, %xmm0 +; SSSE3-NEXT: punpcklbw {{.*#+}} xmm0 = xmm0[0],xmm4[0],xmm0[1],xmm4[1],xmm0[2],xmm4[2],xmm0[3],xmm4[3],xmm0[4],xmm4[4],xmm0[5],xmm4[5],xmm0[6],xmm4[6],xmm0[7],xmm4[7] +; SSSE3-NEXT: punpcklbw {{.*#+}} xmm0 = xmm0[0],xmm2[0],xmm0[1],xmm2[1],xmm0[2],xmm2[2],xmm0[3],xmm2[3],xmm0[4],xmm2[4],xmm0[5],xmm2[5],xmm0[6],xmm2[6],xmm0[7],xmm2[7] +; SSSE3-NEXT: punpcklbw {{.*#+}} xmm0 = xmm0[0],xmm3[0],xmm0[1],xmm3[1],xmm0[2],xmm3[2],xmm0[3],xmm3[3],xmm0[4],xmm3[4],xmm0[5],xmm3[5],xmm0[6],xmm3[6],xmm0[7],xmm3[7] +; SSSE3-NEXT: punpcklbw {{.*#+}} xmm0 = xmm0[0],xmm1[0],xmm0[1],xmm1[1],xmm0[2],xmm1[2],xmm0[3],xmm1[3],xmm0[4],xmm1[4],xmm0[5],xmm1[5],xmm0[6],xmm1[6],xmm0[7],xmm1[7] +; SSSE3-NEXT: popq %rbx +; SSSE3-NEXT: retq +; +; SSE41-LABEL: testv16i8u: +; SSE41: # BB#0: +; SSE41-NEXT: pextrb $1, %xmm0, %eax +; SSE41-NEXT: bsrl %eax, %eax +; SSE41-NEXT: xorl $7, %eax +; SSE41-NEXT: pextrb $0, %xmm0, %ecx +; SSE41-NEXT: bsrl %ecx, %ecx +; SSE41-NEXT: xorl $7, %ecx +; SSE41-NEXT: movd %ecx, %xmm1 +; SSE41-NEXT: pinsrb $1, %eax, %xmm1 +; SSE41-NEXT: pextrb $2, %xmm0, %eax +; SSE41-NEXT: bsrl %eax, %eax +; SSE41-NEXT: xorl $7, %eax +; SSE41-NEXT: pinsrb $2, %eax, %xmm1 +; SSE41-NEXT: pextrb $3, %xmm0, %eax +; SSE41-NEXT: bsrl %eax, %eax +; SSE41-NEXT: xorl $7, %eax +; SSE41-NEXT: pinsrb $3, %eax, %xmm1 +; SSE41-NEXT: pextrb $4, %xmm0, %eax +; SSE41-NEXT: bsrl %eax, %eax +; SSE41-NEXT: xorl $7, %eax +; SSE41-NEXT: pinsrb $4, %eax, %xmm1 +; SSE41-NEXT: pextrb $5, %xmm0, %eax +; SSE41-NEXT: bsrl %eax, %eax +; SSE41-NEXT: xorl $7, %eax +; SSE41-NEXT: pinsrb $5, %eax, %xmm1 +; SSE41-NEXT: pextrb $6, %xmm0, %eax +; SSE41-NEXT: bsrl %eax, %eax +; SSE41-NEXT: xorl $7, %eax +; SSE41-NEXT: pinsrb $6, %eax, %xmm1 +; SSE41-NEXT: pextrb $7, %xmm0, %eax +; SSE41-NEXT: bsrl %eax, %eax +; SSE41-NEXT: xorl $7, %eax +; SSE41-NEXT: pinsrb $7, %eax, %xmm1 +; SSE41-NEXT: pextrb $8, %xmm0, %eax +; SSE41-NEXT: bsrl %eax, %eax +; SSE41-NEXT: xorl $7, %eax +; SSE41-NEXT: pinsrb $8, %eax, %xmm1 +; SSE41-NEXT: pextrb $9, %xmm0, %eax +; SSE41-NEXT: bsrl %eax, %eax +; SSE41-NEXT: xorl $7, %eax +; SSE41-NEXT: pinsrb $9, %eax, %xmm1 +; SSE41-NEXT: pextrb $10, %xmm0, %eax +; SSE41-NEXT: bsrl %eax, %eax +; SSE41-NEXT: xorl $7, %eax +; SSE41-NEXT: pinsrb $10, %eax, %xmm1 +; SSE41-NEXT: pextrb $11, %xmm0, %eax +; SSE41-NEXT: bsrl %eax, %eax +; SSE41-NEXT: xorl $7, %eax +; SSE41-NEXT: pinsrb $11, %eax, %xmm1 +; SSE41-NEXT: pextrb $12, %xmm0, %eax +; SSE41-NEXT: bsrl %eax, %eax +; SSE41-NEXT: xorl $7, %eax +; SSE41-NEXT: pinsrb $12, %eax, %xmm1 +; SSE41-NEXT: pextrb $13, %xmm0, %eax +; SSE41-NEXT: bsrl %eax, %eax +; SSE41-NEXT: xorl $7, %eax +; SSE41-NEXT: pinsrb $13, %eax, %xmm1 +; SSE41-NEXT: pextrb $14, %xmm0, %eax +; SSE41-NEXT: bsrl %eax, %eax +; SSE41-NEXT: xorl $7, %eax +; SSE41-NEXT: pinsrb $14, %eax, %xmm1 +; SSE41-NEXT: pextrb $15, %xmm0, %eax +; SSE41-NEXT: bsrl %eax, %eax +; SSE41-NEXT: xorl $7, %eax +; SSE41-NEXT: pinsrb $15, %eax, %xmm1 +; SSE41-NEXT: movdqa %xmm1, %xmm0 +; SSE41-NEXT: retq +; +; AVX-LABEL: testv16i8u: +; AVX: # BB#0: +; AVX-NEXT: vpextrb $1, %xmm0, %eax +; AVX-NEXT: bsrl %eax, %eax +; AVX-NEXT: xorl $7, %eax +; AVX-NEXT: vpextrb $0, %xmm0, %ecx +; AVX-NEXT: bsrl %ecx, %ecx +; AVX-NEXT: xorl $7, %ecx +; AVX-NEXT: vmovd %ecx, %xmm1 +; AVX-NEXT: vpinsrb $1, %eax, %xmm1, %xmm1 +; AVX-NEXT: vpextrb $2, %xmm0, %eax +; AVX-NEXT: bsrl %eax, %eax +; AVX-NEXT: xorl $7, %eax +; AVX-NEXT: vpinsrb $2, %eax, %xmm1, %xmm1 +; AVX-NEXT: vpextrb $3, %xmm0, %eax +; AVX-NEXT: bsrl %eax, %eax +; AVX-NEXT: xorl $7, %eax +; AVX-NEXT: vpinsrb $3, %eax, %xmm1, %xmm1 +; AVX-NEXT: vpextrb $4, %xmm0, %eax +; AVX-NEXT: bsrl %eax, %eax +; AVX-NEXT: xorl $7, %eax +; AVX-NEXT: vpinsrb $4, %eax, %xmm1, %xmm1 +; AVX-NEXT: vpextrb $5, %xmm0, %eax +; AVX-NEXT: bsrl %eax, %eax +; AVX-NEXT: xorl $7, %eax +; AVX-NEXT: vpinsrb $5, %eax, %xmm1, %xmm1 +; AVX-NEXT: vpextrb $6, %xmm0, %eax +; AVX-NEXT: bsrl %eax, %eax +; AVX-NEXT: xorl $7, %eax +; AVX-NEXT: vpinsrb $6, %eax, %xmm1, %xmm1 +; AVX-NEXT: vpextrb $7, %xmm0, %eax +; AVX-NEXT: bsrl %eax, %eax +; AVX-NEXT: xorl $7, %eax +; AVX-NEXT: vpinsrb $7, %eax, %xmm1, %xmm1 +; AVX-NEXT: vpextrb $8, %xmm0, %eax +; AVX-NEXT: bsrl %eax, %eax +; AVX-NEXT: xorl $7, %eax +; AVX-NEXT: vpinsrb $8, %eax, %xmm1, %xmm1 +; AVX-NEXT: vpextrb $9, %xmm0, %eax +; AVX-NEXT: bsrl %eax, %eax +; AVX-NEXT: xorl $7, %eax +; AVX-NEXT: vpinsrb $9, %eax, %xmm1, %xmm1 +; AVX-NEXT: vpextrb $10, %xmm0, %eax +; AVX-NEXT: bsrl %eax, %eax +; AVX-NEXT: xorl $7, %eax +; AVX-NEXT: vpinsrb $10, %eax, %xmm1, %xmm1 +; AVX-NEXT: vpextrb $11, %xmm0, %eax +; AVX-NEXT: bsrl %eax, %eax +; AVX-NEXT: xorl $7, %eax +; AVX-NEXT: vpinsrb $11, %eax, %xmm1, %xmm1 +; AVX-NEXT: vpextrb $12, %xmm0, %eax +; AVX-NEXT: bsrl %eax, %eax +; AVX-NEXT: xorl $7, %eax +; AVX-NEXT: vpinsrb $12, %eax, %xmm1, %xmm1 +; AVX-NEXT: vpextrb $13, %xmm0, %eax +; AVX-NEXT: bsrl %eax, %eax +; AVX-NEXT: xorl $7, %eax +; AVX-NEXT: vpinsrb $13, %eax, %xmm1, %xmm1 +; AVX-NEXT: vpextrb $14, %xmm0, %eax +; AVX-NEXT: bsrl %eax, %eax +; AVX-NEXT: xorl $7, %eax +; AVX-NEXT: vpinsrb $14, %eax, %xmm1, %xmm1 +; AVX-NEXT: vpextrb $15, %xmm0, %eax +; AVX-NEXT: bsrl %eax, %eax +; AVX-NEXT: xorl $7, %eax +; AVX-NEXT: vpinsrb $15, %eax, %xmm1, %xmm0 +; AVX-NEXT: retq + %out = call <16 x i8> @llvm.ctlz.v16i8(<16 x i8> %in, i1 -1) + ret <16 x i8> %out +} + define <2 x i64> @foldv2i64() { ; SSE-LABEL: foldv2i64: ; SSE: # BB#0: diff --git a/llvm/test/CodeGen/X86/vector-lzcnt-256.ll b/llvm/test/CodeGen/X86/vector-lzcnt-256.ll index b716289..48abe12 100644 --- a/llvm/test/CodeGen/X86/vector-lzcnt-256.ll +++ b/llvm/test/CodeGen/X86/vector-lzcnt-256.ll @@ -3,6 +3,1230 @@ target triple = "x86_64-unknown-unknown" +define <4 x i64> @testv4i64(<4 x i64> %in) { +; AVX1-LABEL: testv4i64: +; AVX1: # BB#0: +; AVX1-NEXT: vextractf128 $1, %ymm0, %xmm1 +; AVX1-NEXT: vpextrq $1, %xmm1, %rax +; AVX1-NEXT: bsrq %rax, %rax +; AVX1-NEXT: movl $127, %ecx +; AVX1-NEXT: cmoveq %rcx, %rax +; AVX1-NEXT: xorq $63, %rax +; AVX1-NEXT: vmovq %rax, %xmm2 +; AVX1-NEXT: vmovq %xmm1, %rax +; AVX1-NEXT: bsrq %rax, %rax +; AVX1-NEXT: cmoveq %rcx, %rax +; AVX1-NEXT: xorq $63, %rax +; AVX1-NEXT: vmovq %rax, %xmm1 +; AVX1-NEXT: vpunpcklqdq {{.*#+}} xmm1 = xmm1[0],xmm2[0] +; AVX1-NEXT: vpextrq $1, %xmm0, %rax +; AVX1-NEXT: bsrq %rax, %rax +; AVX1-NEXT: cmoveq %rcx, %rax +; AVX1-NEXT: xorq $63, %rax +; AVX1-NEXT: vmovq %rax, %xmm2 +; AVX1-NEXT: vmovq %xmm0, %rax +; AVX1-NEXT: bsrq %rax, %rax +; AVX1-NEXT: cmoveq %rcx, %rax +; AVX1-NEXT: xorq $63, %rax +; AVX1-NEXT: vmovq %rax, %xmm0 +; AVX1-NEXT: vpunpcklqdq {{.*#+}} xmm0 = xmm0[0],xmm2[0] +; AVX1-NEXT: vinsertf128 $1, %xmm1, %ymm0, %ymm0 +; AVX1-NEXT: retq +; +; AVX2-LABEL: testv4i64: +; AVX2: # BB#0: +; AVX2-NEXT: vextracti128 $1, %ymm0, %xmm1 +; AVX2-NEXT: vpextrq $1, %xmm1, %rax +; AVX2-NEXT: bsrq %rax, %rax +; AVX2-NEXT: movl $127, %ecx +; AVX2-NEXT: cmoveq %rcx, %rax +; AVX2-NEXT: xorq $63, %rax +; AVX2-NEXT: vmovq %rax, %xmm2 +; AVX2-NEXT: vmovq %xmm1, %rax +; AVX2-NEXT: bsrq %rax, %rax +; AVX2-NEXT: cmoveq %rcx, %rax +; AVX2-NEXT: xorq $63, %rax +; AVX2-NEXT: vmovq %rax, %xmm1 +; AVX2-NEXT: vpunpcklqdq {{.*#+}} xmm1 = xmm1[0],xmm2[0] +; AVX2-NEXT: vpextrq $1, %xmm0, %rax +; AVX2-NEXT: bsrq %rax, %rax +; AVX2-NEXT: cmoveq %rcx, %rax +; AVX2-NEXT: xorq $63, %rax +; AVX2-NEXT: vmovq %rax, %xmm2 +; AVX2-NEXT: vmovq %xmm0, %rax +; AVX2-NEXT: bsrq %rax, %rax +; AVX2-NEXT: cmoveq %rcx, %rax +; AVX2-NEXT: xorq $63, %rax +; AVX2-NEXT: vmovq %rax, %xmm0 +; AVX2-NEXT: vpunpcklqdq {{.*#+}} xmm0 = xmm0[0],xmm2[0] +; AVX2-NEXT: vinserti128 $1, %xmm1, %ymm0, %ymm0 +; AVX2-NEXT: retq + %out = call <4 x i64> @llvm.ctlz.v4i64(<4 x i64> %in, i1 0) + ret <4 x i64> %out +} + +define <4 x i64> @testv4i64u(<4 x i64> %in) { +; AVX1-LABEL: testv4i64u: +; AVX1: # BB#0: +; AVX1-NEXT: vextractf128 $1, %ymm0, %xmm1 +; AVX1-NEXT: vpextrq $1, %xmm1, %rax +; AVX1-NEXT: bsrq %rax, %rax +; AVX1-NEXT: xorq $63, %rax +; AVX1-NEXT: vmovq %rax, %xmm2 +; AVX1-NEXT: vmovq %xmm1, %rax +; AVX1-NEXT: bsrq %rax, %rax +; AVX1-NEXT: xorq $63, %rax +; AVX1-NEXT: vmovq %rax, %xmm1 +; AVX1-NEXT: vpunpcklqdq {{.*#+}} xmm1 = xmm1[0],xmm2[0] +; AVX1-NEXT: vpextrq $1, %xmm0, %rax +; AVX1-NEXT: bsrq %rax, %rax +; AVX1-NEXT: xorq $63, %rax +; AVX1-NEXT: vmovq %rax, %xmm2 +; AVX1-NEXT: vmovq %xmm0, %rax +; AVX1-NEXT: bsrq %rax, %rax +; AVX1-NEXT: xorq $63, %rax +; AVX1-NEXT: vmovq %rax, %xmm0 +; AVX1-NEXT: vpunpcklqdq {{.*#+}} xmm0 = xmm0[0],xmm2[0] +; AVX1-NEXT: vinsertf128 $1, %xmm1, %ymm0, %ymm0 +; AVX1-NEXT: retq +; +; AVX2-LABEL: testv4i64u: +; AVX2: # BB#0: +; AVX2-NEXT: vextracti128 $1, %ymm0, %xmm1 +; AVX2-NEXT: vpextrq $1, %xmm1, %rax +; AVX2-NEXT: bsrq %rax, %rax +; AVX2-NEXT: xorq $63, %rax +; AVX2-NEXT: vmovq %rax, %xmm2 +; AVX2-NEXT: vmovq %xmm1, %rax +; AVX2-NEXT: bsrq %rax, %rax +; AVX2-NEXT: xorq $63, %rax +; AVX2-NEXT: vmovq %rax, %xmm1 +; AVX2-NEXT: vpunpcklqdq {{.*#+}} xmm1 = xmm1[0],xmm2[0] +; AVX2-NEXT: vpextrq $1, %xmm0, %rax +; AVX2-NEXT: bsrq %rax, %rax +; AVX2-NEXT: xorq $63, %rax +; AVX2-NEXT: vmovq %rax, %xmm2 +; AVX2-NEXT: vmovq %xmm0, %rax +; AVX2-NEXT: bsrq %rax, %rax +; AVX2-NEXT: xorq $63, %rax +; AVX2-NEXT: vmovq %rax, %xmm0 +; AVX2-NEXT: vpunpcklqdq {{.*#+}} xmm0 = xmm0[0],xmm2[0] +; AVX2-NEXT: vinserti128 $1, %xmm1, %ymm0, %ymm0 +; AVX2-NEXT: retq + %out = call <4 x i64> @llvm.ctlz.v4i64(<4 x i64> %in, i1 -1) + ret <4 x i64> %out +} + +define <8 x i32> @testv8i32(<8 x i32> %in) { +; AVX1-LABEL: testv8i32: +; AVX1: # BB#0: +; AVX1-NEXT: vextractf128 $1, %ymm0, %xmm1 +; AVX1-NEXT: vpextrd $1, %xmm1, %eax +; AVX1-NEXT: bsrl %eax, %ecx +; AVX1-NEXT: movl $63, %eax +; AVX1-NEXT: cmovel %eax, %ecx +; AVX1-NEXT: xorl $31, %ecx +; AVX1-NEXT: vmovd %xmm1, %edx +; AVX1-NEXT: bsrl %edx, %edx +; AVX1-NEXT: cmovel %eax, %edx +; AVX1-NEXT: xorl $31, %edx +; AVX1-NEXT: vmovd %edx, %xmm2 +; AVX1-NEXT: vpinsrd $1, %ecx, %xmm2, %xmm2 +; AVX1-NEXT: vpextrd $2, %xmm1, %ecx +; AVX1-NEXT: bsrl %ecx, %ecx +; AVX1-NEXT: cmovel %eax, %ecx +; AVX1-NEXT: xorl $31, %ecx +; AVX1-NEXT: vpinsrd $2, %ecx, %xmm2, %xmm2 +; AVX1-NEXT: vpextrd $3, %xmm1, %ecx +; AVX1-NEXT: bsrl %ecx, %ecx +; AVX1-NEXT: cmovel %eax, %ecx +; AVX1-NEXT: xorl $31, %ecx +; AVX1-NEXT: vpinsrd $3, %ecx, %xmm2, %xmm1 +; AVX1-NEXT: vpextrd $1, %xmm0, %ecx +; AVX1-NEXT: bsrl %ecx, %ecx +; AVX1-NEXT: cmovel %eax, %ecx +; AVX1-NEXT: xorl $31, %ecx +; AVX1-NEXT: vmovd %xmm0, %edx +; AVX1-NEXT: bsrl %edx, %edx +; AVX1-NEXT: cmovel %eax, %edx +; AVX1-NEXT: xorl $31, %edx +; AVX1-NEXT: vmovd %edx, %xmm2 +; AVX1-NEXT: vpinsrd $1, %ecx, %xmm2, %xmm2 +; AVX1-NEXT: vpextrd $2, %xmm0, %ecx +; AVX1-NEXT: bsrl %ecx, %ecx +; AVX1-NEXT: cmovel %eax, %ecx +; AVX1-NEXT: xorl $31, %ecx +; AVX1-NEXT: vpinsrd $2, %ecx, %xmm2, %xmm2 +; AVX1-NEXT: vpextrd $3, %xmm0, %ecx +; AVX1-NEXT: bsrl %ecx, %ecx +; AVX1-NEXT: cmovel %eax, %ecx +; AVX1-NEXT: xorl $31, %ecx +; AVX1-NEXT: vpinsrd $3, %ecx, %xmm2, %xmm0 +; AVX1-NEXT: vinsertf128 $1, %xmm1, %ymm0, %ymm0 +; AVX1-NEXT: retq +; +; AVX2-LABEL: testv8i32: +; AVX2: # BB#0: +; AVX2-NEXT: vextracti128 $1, %ymm0, %xmm1 +; AVX2-NEXT: vpextrd $1, %xmm1, %eax +; AVX2-NEXT: bsrl %eax, %ecx +; AVX2-NEXT: movl $63, %eax +; AVX2-NEXT: cmovel %eax, %ecx +; AVX2-NEXT: xorl $31, %ecx +; AVX2-NEXT: vmovd %xmm1, %edx +; AVX2-NEXT: bsrl %edx, %edx +; AVX2-NEXT: cmovel %eax, %edx +; AVX2-NEXT: xorl $31, %edx +; AVX2-NEXT: vmovd %edx, %xmm2 +; AVX2-NEXT: vpinsrd $1, %ecx, %xmm2, %xmm2 +; AVX2-NEXT: vpextrd $2, %xmm1, %ecx +; AVX2-NEXT: bsrl %ecx, %ecx +; AVX2-NEXT: cmovel %eax, %ecx +; AVX2-NEXT: xorl $31, %ecx +; AVX2-NEXT: vpinsrd $2, %ecx, %xmm2, %xmm2 +; AVX2-NEXT: vpextrd $3, %xmm1, %ecx +; AVX2-NEXT: bsrl %ecx, %ecx +; AVX2-NEXT: cmovel %eax, %ecx +; AVX2-NEXT: xorl $31, %ecx +; AVX2-NEXT: vpinsrd $3, %ecx, %xmm2, %xmm1 +; AVX2-NEXT: vpextrd $1, %xmm0, %ecx +; AVX2-NEXT: bsrl %ecx, %ecx +; AVX2-NEXT: cmovel %eax, %ecx +; AVX2-NEXT: xorl $31, %ecx +; AVX2-NEXT: vmovd %xmm0, %edx +; AVX2-NEXT: bsrl %edx, %edx +; AVX2-NEXT: cmovel %eax, %edx +; AVX2-NEXT: xorl $31, %edx +; AVX2-NEXT: vmovd %edx, %xmm2 +; AVX2-NEXT: vpinsrd $1, %ecx, %xmm2, %xmm2 +; AVX2-NEXT: vpextrd $2, %xmm0, %ecx +; AVX2-NEXT: bsrl %ecx, %ecx +; AVX2-NEXT: cmovel %eax, %ecx +; AVX2-NEXT: xorl $31, %ecx +; AVX2-NEXT: vpinsrd $2, %ecx, %xmm2, %xmm2 +; AVX2-NEXT: vpextrd $3, %xmm0, %ecx +; AVX2-NEXT: bsrl %ecx, %ecx +; AVX2-NEXT: cmovel %eax, %ecx +; AVX2-NEXT: xorl $31, %ecx +; AVX2-NEXT: vpinsrd $3, %ecx, %xmm2, %xmm0 +; AVX2-NEXT: vinserti128 $1, %xmm1, %ymm0, %ymm0 +; AVX2-NEXT: retq + %out = call <8 x i32> @llvm.ctlz.v8i32(<8 x i32> %in, i1 0) + ret <8 x i32> %out +} + +define <8 x i32> @testv8i32u(<8 x i32> %in) { +; AVX1-LABEL: testv8i32u: +; AVX1: # BB#0: +; AVX1-NEXT: vextractf128 $1, %ymm0, %xmm1 +; AVX1-NEXT: vpextrd $1, %xmm1, %eax +; AVX1-NEXT: bsrl %eax, %eax +; AVX1-NEXT: xorl $31, %eax +; AVX1-NEXT: vmovd %xmm1, %ecx +; AVX1-NEXT: bsrl %ecx, %ecx +; AVX1-NEXT: xorl $31, %ecx +; AVX1-NEXT: vmovd %ecx, %xmm2 +; AVX1-NEXT: vpinsrd $1, %eax, %xmm2, %xmm2 +; AVX1-NEXT: vpextrd $2, %xmm1, %eax +; AVX1-NEXT: bsrl %eax, %eax +; AVX1-NEXT: xorl $31, %eax +; AVX1-NEXT: vpinsrd $2, %eax, %xmm2, %xmm2 +; AVX1-NEXT: vpextrd $3, %xmm1, %eax +; AVX1-NEXT: bsrl %eax, %eax +; AVX1-NEXT: xorl $31, %eax +; AVX1-NEXT: vpinsrd $3, %eax, %xmm2, %xmm1 +; AVX1-NEXT: vpextrd $1, %xmm0, %eax +; AVX1-NEXT: bsrl %eax, %eax +; AVX1-NEXT: xorl $31, %eax +; AVX1-NEXT: vmovd %xmm0, %ecx +; AVX1-NEXT: bsrl %ecx, %ecx +; AVX1-NEXT: xorl $31, %ecx +; AVX1-NEXT: vmovd %ecx, %xmm2 +; AVX1-NEXT: vpinsrd $1, %eax, %xmm2, %xmm2 +; AVX1-NEXT: vpextrd $2, %xmm0, %eax +; AVX1-NEXT: bsrl %eax, %eax +; AVX1-NEXT: xorl $31, %eax +; AVX1-NEXT: vpinsrd $2, %eax, %xmm2, %xmm2 +; AVX1-NEXT: vpextrd $3, %xmm0, %eax +; AVX1-NEXT: bsrl %eax, %eax +; AVX1-NEXT: xorl $31, %eax +; AVX1-NEXT: vpinsrd $3, %eax, %xmm2, %xmm0 +; AVX1-NEXT: vinsertf128 $1, %xmm1, %ymm0, %ymm0 +; AVX1-NEXT: retq +; +; AVX2-LABEL: testv8i32u: +; AVX2: # BB#0: +; AVX2-NEXT: vextracti128 $1, %ymm0, %xmm1 +; AVX2-NEXT: vpextrd $1, %xmm1, %eax +; AVX2-NEXT: bsrl %eax, %eax +; AVX2-NEXT: xorl $31, %eax +; AVX2-NEXT: vmovd %xmm1, %ecx +; AVX2-NEXT: bsrl %ecx, %ecx +; AVX2-NEXT: xorl $31, %ecx +; AVX2-NEXT: vmovd %ecx, %xmm2 +; AVX2-NEXT: vpinsrd $1, %eax, %xmm2, %xmm2 +; AVX2-NEXT: vpextrd $2, %xmm1, %eax +; AVX2-NEXT: bsrl %eax, %eax +; AVX2-NEXT: xorl $31, %eax +; AVX2-NEXT: vpinsrd $2, %eax, %xmm2, %xmm2 +; AVX2-NEXT: vpextrd $3, %xmm1, %eax +; AVX2-NEXT: bsrl %eax, %eax +; AVX2-NEXT: xorl $31, %eax +; AVX2-NEXT: vpinsrd $3, %eax, %xmm2, %xmm1 +; AVX2-NEXT: vpextrd $1, %xmm0, %eax +; AVX2-NEXT: bsrl %eax, %eax +; AVX2-NEXT: xorl $31, %eax +; AVX2-NEXT: vmovd %xmm0, %ecx +; AVX2-NEXT: bsrl %ecx, %ecx +; AVX2-NEXT: xorl $31, %ecx +; AVX2-NEXT: vmovd %ecx, %xmm2 +; AVX2-NEXT: vpinsrd $1, %eax, %xmm2, %xmm2 +; AVX2-NEXT: vpextrd $2, %xmm0, %eax +; AVX2-NEXT: bsrl %eax, %eax +; AVX2-NEXT: xorl $31, %eax +; AVX2-NEXT: vpinsrd $2, %eax, %xmm2, %xmm2 +; AVX2-NEXT: vpextrd $3, %xmm0, %eax +; AVX2-NEXT: bsrl %eax, %eax +; AVX2-NEXT: xorl $31, %eax +; AVX2-NEXT: vpinsrd $3, %eax, %xmm2, %xmm0 +; AVX2-NEXT: vinserti128 $1, %xmm1, %ymm0, %ymm0 +; AVX2-NEXT: retq + %out = call <8 x i32> @llvm.ctlz.v8i32(<8 x i32> %in, i1 -1) + ret <8 x i32> %out +} + +define <16 x i16> @testv16i16(<16 x i16> %in) { +; AVX1-LABEL: testv16i16: +; AVX1: # BB#0: +; AVX1-NEXT: vextractf128 $1, %ymm0, %xmm1 +; AVX1-NEXT: vpextrw $1, %xmm1, %eax +; AVX1-NEXT: bsrw %ax, %cx +; AVX1-NEXT: movw $31, %ax +; AVX1-NEXT: cmovew %ax, %cx +; AVX1-NEXT: xorl $15, %ecx +; AVX1-NEXT: vmovd %xmm1, %edx +; AVX1-NEXT: bsrw %dx, %dx +; AVX1-NEXT: cmovew %ax, %dx +; AVX1-NEXT: xorl $15, %edx +; AVX1-NEXT: vmovd %edx, %xmm2 +; AVX1-NEXT: vpinsrw $1, %ecx, %xmm2, %xmm2 +; AVX1-NEXT: vpextrw $2, %xmm1, %ecx +; AVX1-NEXT: bsrw %cx, %cx +; AVX1-NEXT: cmovew %ax, %cx +; AVX1-NEXT: xorl $15, %ecx +; AVX1-NEXT: vpinsrw $2, %ecx, %xmm2, %xmm2 +; AVX1-NEXT: vpextrw $3, %xmm1, %ecx +; AVX1-NEXT: bsrw %cx, %cx +; AVX1-NEXT: cmovew %ax, %cx +; AVX1-NEXT: xorl $15, %ecx +; AVX1-NEXT: vpinsrw $3, %ecx, %xmm2, %xmm2 +; AVX1-NEXT: vpextrw $4, %xmm1, %ecx +; AVX1-NEXT: bsrw %cx, %cx +; AVX1-NEXT: cmovew %ax, %cx +; AVX1-NEXT: xorl $15, %ecx +; AVX1-NEXT: vpinsrw $4, %ecx, %xmm2, %xmm2 +; AVX1-NEXT: vpextrw $5, %xmm1, %ecx +; AVX1-NEXT: bsrw %cx, %cx +; AVX1-NEXT: cmovew %ax, %cx +; AVX1-NEXT: xorl $15, %ecx +; AVX1-NEXT: vpinsrw $5, %ecx, %xmm2, %xmm2 +; AVX1-NEXT: vpextrw $6, %xmm1, %ecx +; AVX1-NEXT: bsrw %cx, %cx +; AVX1-NEXT: cmovew %ax, %cx +; AVX1-NEXT: xorl $15, %ecx +; AVX1-NEXT: vpinsrw $6, %ecx, %xmm2, %xmm2 +; AVX1-NEXT: vpextrw $7, %xmm1, %ecx +; AVX1-NEXT: bsrw %cx, %cx +; AVX1-NEXT: cmovew %ax, %cx +; AVX1-NEXT: xorl $15, %ecx +; AVX1-NEXT: vpinsrw $7, %ecx, %xmm2, %xmm1 +; AVX1-NEXT: vpextrw $1, %xmm0, %ecx +; AVX1-NEXT: bsrw %cx, %cx +; AVX1-NEXT: cmovew %ax, %cx +; AVX1-NEXT: xorl $15, %ecx +; AVX1-NEXT: vmovd %xmm0, %edx +; AVX1-NEXT: bsrw %dx, %dx +; AVX1-NEXT: cmovew %ax, %dx +; AVX1-NEXT: xorl $15, %edx +; AVX1-NEXT: vmovd %edx, %xmm2 +; AVX1-NEXT: vpinsrw $1, %ecx, %xmm2, %xmm2 +; AVX1-NEXT: vpextrw $2, %xmm0, %ecx +; AVX1-NEXT: bsrw %cx, %cx +; AVX1-NEXT: cmovew %ax, %cx +; AVX1-NEXT: xorl $15, %ecx +; AVX1-NEXT: vpinsrw $2, %ecx, %xmm2, %xmm2 +; AVX1-NEXT: vpextrw $3, %xmm0, %ecx +; AVX1-NEXT: bsrw %cx, %cx +; AVX1-NEXT: cmovew %ax, %cx +; AVX1-NEXT: xorl $15, %ecx +; AVX1-NEXT: vpinsrw $3, %ecx, %xmm2, %xmm2 +; AVX1-NEXT: vpextrw $4, %xmm0, %ecx +; AVX1-NEXT: bsrw %cx, %cx +; AVX1-NEXT: cmovew %ax, %cx +; AVX1-NEXT: xorl $15, %ecx +; AVX1-NEXT: vpinsrw $4, %ecx, %xmm2, %xmm2 +; AVX1-NEXT: vpextrw $5, %xmm0, %ecx +; AVX1-NEXT: bsrw %cx, %cx +; AVX1-NEXT: cmovew %ax, %cx +; AVX1-NEXT: xorl $15, %ecx +; AVX1-NEXT: vpinsrw $5, %ecx, %xmm2, %xmm2 +; AVX1-NEXT: vpextrw $6, %xmm0, %ecx +; AVX1-NEXT: bsrw %cx, %cx +; AVX1-NEXT: cmovew %ax, %cx +; AVX1-NEXT: xorl $15, %ecx +; AVX1-NEXT: vpinsrw $6, %ecx, %xmm2, %xmm2 +; AVX1-NEXT: vpextrw $7, %xmm0, %ecx +; AVX1-NEXT: bsrw %cx, %cx +; AVX1-NEXT: cmovew %ax, %cx +; AVX1-NEXT: xorl $15, %ecx +; AVX1-NEXT: vpinsrw $7, %ecx, %xmm2, %xmm0 +; AVX1-NEXT: vinsertf128 $1, %xmm1, %ymm0, %ymm0 +; AVX1-NEXT: retq +; +; AVX2-LABEL: testv16i16: +; AVX2: # BB#0: +; AVX2-NEXT: vextracti128 $1, %ymm0, %xmm1 +; AVX2-NEXT: vpextrw $1, %xmm1, %eax +; AVX2-NEXT: bsrw %ax, %cx +; AVX2-NEXT: movw $31, %ax +; AVX2-NEXT: cmovew %ax, %cx +; AVX2-NEXT: xorl $15, %ecx +; AVX2-NEXT: vmovd %xmm1, %edx +; AVX2-NEXT: bsrw %dx, %dx +; AVX2-NEXT: cmovew %ax, %dx +; AVX2-NEXT: xorl $15, %edx +; AVX2-NEXT: vmovd %edx, %xmm2 +; AVX2-NEXT: vpinsrw $1, %ecx, %xmm2, %xmm2 +; AVX2-NEXT: vpextrw $2, %xmm1, %ecx +; AVX2-NEXT: bsrw %cx, %cx +; AVX2-NEXT: cmovew %ax, %cx +; AVX2-NEXT: xorl $15, %ecx +; AVX2-NEXT: vpinsrw $2, %ecx, %xmm2, %xmm2 +; AVX2-NEXT: vpextrw $3, %xmm1, %ecx +; AVX2-NEXT: bsrw %cx, %cx +; AVX2-NEXT: cmovew %ax, %cx +; AVX2-NEXT: xorl $15, %ecx +; AVX2-NEXT: vpinsrw $3, %ecx, %xmm2, %xmm2 +; AVX2-NEXT: vpextrw $4, %xmm1, %ecx +; AVX2-NEXT: bsrw %cx, %cx +; AVX2-NEXT: cmovew %ax, %cx +; AVX2-NEXT: xorl $15, %ecx +; AVX2-NEXT: vpinsrw $4, %ecx, %xmm2, %xmm2 +; AVX2-NEXT: vpextrw $5, %xmm1, %ecx +; AVX2-NEXT: bsrw %cx, %cx +; AVX2-NEXT: cmovew %ax, %cx +; AVX2-NEXT: xorl $15, %ecx +; AVX2-NEXT: vpinsrw $5, %ecx, %xmm2, %xmm2 +; AVX2-NEXT: vpextrw $6, %xmm1, %ecx +; AVX2-NEXT: bsrw %cx, %cx +; AVX2-NEXT: cmovew %ax, %cx +; AVX2-NEXT: xorl $15, %ecx +; AVX2-NEXT: vpinsrw $6, %ecx, %xmm2, %xmm2 +; AVX2-NEXT: vpextrw $7, %xmm1, %ecx +; AVX2-NEXT: bsrw %cx, %cx +; AVX2-NEXT: cmovew %ax, %cx +; AVX2-NEXT: xorl $15, %ecx +; AVX2-NEXT: vpinsrw $7, %ecx, %xmm2, %xmm1 +; AVX2-NEXT: vpextrw $1, %xmm0, %ecx +; AVX2-NEXT: bsrw %cx, %cx +; AVX2-NEXT: cmovew %ax, %cx +; AVX2-NEXT: xorl $15, %ecx +; AVX2-NEXT: vmovd %xmm0, %edx +; AVX2-NEXT: bsrw %dx, %dx +; AVX2-NEXT: cmovew %ax, %dx +; AVX2-NEXT: xorl $15, %edx +; AVX2-NEXT: vmovd %edx, %xmm2 +; AVX2-NEXT: vpinsrw $1, %ecx, %xmm2, %xmm2 +; AVX2-NEXT: vpextrw $2, %xmm0, %ecx +; AVX2-NEXT: bsrw %cx, %cx +; AVX2-NEXT: cmovew %ax, %cx +; AVX2-NEXT: xorl $15, %ecx +; AVX2-NEXT: vpinsrw $2, %ecx, %xmm2, %xmm2 +; AVX2-NEXT: vpextrw $3, %xmm0, %ecx +; AVX2-NEXT: bsrw %cx, %cx +; AVX2-NEXT: cmovew %ax, %cx +; AVX2-NEXT: xorl $15, %ecx +; AVX2-NEXT: vpinsrw $3, %ecx, %xmm2, %xmm2 +; AVX2-NEXT: vpextrw $4, %xmm0, %ecx +; AVX2-NEXT: bsrw %cx, %cx +; AVX2-NEXT: cmovew %ax, %cx +; AVX2-NEXT: xorl $15, %ecx +; AVX2-NEXT: vpinsrw $4, %ecx, %xmm2, %xmm2 +; AVX2-NEXT: vpextrw $5, %xmm0, %ecx +; AVX2-NEXT: bsrw %cx, %cx +; AVX2-NEXT: cmovew %ax, %cx +; AVX2-NEXT: xorl $15, %ecx +; AVX2-NEXT: vpinsrw $5, %ecx, %xmm2, %xmm2 +; AVX2-NEXT: vpextrw $6, %xmm0, %ecx +; AVX2-NEXT: bsrw %cx, %cx +; AVX2-NEXT: cmovew %ax, %cx +; AVX2-NEXT: xorl $15, %ecx +; AVX2-NEXT: vpinsrw $6, %ecx, %xmm2, %xmm2 +; AVX2-NEXT: vpextrw $7, %xmm0, %ecx +; AVX2-NEXT: bsrw %cx, %cx +; AVX2-NEXT: cmovew %ax, %cx +; AVX2-NEXT: xorl $15, %ecx +; AVX2-NEXT: vpinsrw $7, %ecx, %xmm2, %xmm0 +; AVX2-NEXT: vinserti128 $1, %xmm1, %ymm0, %ymm0 +; AVX2-NEXT: retq + %out = call <16 x i16> @llvm.ctlz.v16i16(<16 x i16> %in, i1 0) + ret <16 x i16> %out +} + +define <16 x i16> @testv16i16u(<16 x i16> %in) { +; AVX1-LABEL: testv16i16u: +; AVX1: # BB#0: +; AVX1-NEXT: vextractf128 $1, %ymm0, %xmm1 +; AVX1-NEXT: vpextrw $1, %xmm1, %eax +; AVX1-NEXT: bsrw %ax, %ax +; AVX1-NEXT: xorl $15, %eax +; AVX1-NEXT: vmovd %xmm1, %ecx +; AVX1-NEXT: bsrw %cx, %cx +; AVX1-NEXT: xorl $15, %ecx +; AVX1-NEXT: vmovd %ecx, %xmm2 +; AVX1-NEXT: vpinsrw $1, %eax, %xmm2, %xmm2 +; AVX1-NEXT: vpextrw $2, %xmm1, %eax +; AVX1-NEXT: bsrw %ax, %ax +; AVX1-NEXT: xorl $15, %eax +; AVX1-NEXT: vpinsrw $2, %eax, %xmm2, %xmm2 +; AVX1-NEXT: vpextrw $3, %xmm1, %eax +; AVX1-NEXT: bsrw %ax, %ax +; AVX1-NEXT: xorl $15, %eax +; AVX1-NEXT: vpinsrw $3, %eax, %xmm2, %xmm2 +; AVX1-NEXT: vpextrw $4, %xmm1, %eax +; AVX1-NEXT: bsrw %ax, %ax +; AVX1-NEXT: xorl $15, %eax +; AVX1-NEXT: vpinsrw $4, %eax, %xmm2, %xmm2 +; AVX1-NEXT: vpextrw $5, %xmm1, %eax +; AVX1-NEXT: bsrw %ax, %ax +; AVX1-NEXT: xorl $15, %eax +; AVX1-NEXT: vpinsrw $5, %eax, %xmm2, %xmm2 +; AVX1-NEXT: vpextrw $6, %xmm1, %eax +; AVX1-NEXT: bsrw %ax, %ax +; AVX1-NEXT: xorl $15, %eax +; AVX1-NEXT: vpinsrw $6, %eax, %xmm2, %xmm2 +; AVX1-NEXT: vpextrw $7, %xmm1, %eax +; AVX1-NEXT: bsrw %ax, %ax +; AVX1-NEXT: xorl $15, %eax +; AVX1-NEXT: vpinsrw $7, %eax, %xmm2, %xmm1 +; AVX1-NEXT: vpextrw $1, %xmm0, %eax +; AVX1-NEXT: bsrw %ax, %ax +; AVX1-NEXT: xorl $15, %eax +; AVX1-NEXT: vmovd %xmm0, %ecx +; AVX1-NEXT: bsrw %cx, %cx +; AVX1-NEXT: xorl $15, %ecx +; AVX1-NEXT: vmovd %ecx, %xmm2 +; AVX1-NEXT: vpinsrw $1, %eax, %xmm2, %xmm2 +; AVX1-NEXT: vpextrw $2, %xmm0, %eax +; AVX1-NEXT: bsrw %ax, %ax +; AVX1-NEXT: xorl $15, %eax +; AVX1-NEXT: vpinsrw $2, %eax, %xmm2, %xmm2 +; AVX1-NEXT: vpextrw $3, %xmm0, %eax +; AVX1-NEXT: bsrw %ax, %ax +; AVX1-NEXT: xorl $15, %eax +; AVX1-NEXT: vpinsrw $3, %eax, %xmm2, %xmm2 +; AVX1-NEXT: vpextrw $4, %xmm0, %eax +; AVX1-NEXT: bsrw %ax, %ax +; AVX1-NEXT: xorl $15, %eax +; AVX1-NEXT: vpinsrw $4, %eax, %xmm2, %xmm2 +; AVX1-NEXT: vpextrw $5, %xmm0, %eax +; AVX1-NEXT: bsrw %ax, %ax +; AVX1-NEXT: xorl $15, %eax +; AVX1-NEXT: vpinsrw $5, %eax, %xmm2, %xmm2 +; AVX1-NEXT: vpextrw $6, %xmm0, %eax +; AVX1-NEXT: bsrw %ax, %ax +; AVX1-NEXT: xorl $15, %eax +; AVX1-NEXT: vpinsrw $6, %eax, %xmm2, %xmm2 +; AVX1-NEXT: vpextrw $7, %xmm0, %eax +; AVX1-NEXT: bsrw %ax, %ax +; AVX1-NEXT: xorl $15, %eax +; AVX1-NEXT: vpinsrw $7, %eax, %xmm2, %xmm0 +; AVX1-NEXT: vinsertf128 $1, %xmm1, %ymm0, %ymm0 +; AVX1-NEXT: retq +; +; AVX2-LABEL: testv16i16u: +; AVX2: # BB#0: +; AVX2-NEXT: vextracti128 $1, %ymm0, %xmm1 +; AVX2-NEXT: vpextrw $1, %xmm1, %eax +; AVX2-NEXT: bsrw %ax, %ax +; AVX2-NEXT: xorl $15, %eax +; AVX2-NEXT: vmovd %xmm1, %ecx +; AVX2-NEXT: bsrw %cx, %cx +; AVX2-NEXT: xorl $15, %ecx +; AVX2-NEXT: vmovd %ecx, %xmm2 +; AVX2-NEXT: vpinsrw $1, %eax, %xmm2, %xmm2 +; AVX2-NEXT: vpextrw $2, %xmm1, %eax +; AVX2-NEXT: bsrw %ax, %ax +; AVX2-NEXT: xorl $15, %eax +; AVX2-NEXT: vpinsrw $2, %eax, %xmm2, %xmm2 +; AVX2-NEXT: vpextrw $3, %xmm1, %eax +; AVX2-NEXT: bsrw %ax, %ax +; AVX2-NEXT: xorl $15, %eax +; AVX2-NEXT: vpinsrw $3, %eax, %xmm2, %xmm2 +; AVX2-NEXT: vpextrw $4, %xmm1, %eax +; AVX2-NEXT: bsrw %ax, %ax +; AVX2-NEXT: xorl $15, %eax +; AVX2-NEXT: vpinsrw $4, %eax, %xmm2, %xmm2 +; AVX2-NEXT: vpextrw $5, %xmm1, %eax +; AVX2-NEXT: bsrw %ax, %ax +; AVX2-NEXT: xorl $15, %eax +; AVX2-NEXT: vpinsrw $5, %eax, %xmm2, %xmm2 +; AVX2-NEXT: vpextrw $6, %xmm1, %eax +; AVX2-NEXT: bsrw %ax, %ax +; AVX2-NEXT: xorl $15, %eax +; AVX2-NEXT: vpinsrw $6, %eax, %xmm2, %xmm2 +; AVX2-NEXT: vpextrw $7, %xmm1, %eax +; AVX2-NEXT: bsrw %ax, %ax +; AVX2-NEXT: xorl $15, %eax +; AVX2-NEXT: vpinsrw $7, %eax, %xmm2, %xmm1 +; AVX2-NEXT: vpextrw $1, %xmm0, %eax +; AVX2-NEXT: bsrw %ax, %ax +; AVX2-NEXT: xorl $15, %eax +; AVX2-NEXT: vmovd %xmm0, %ecx +; AVX2-NEXT: bsrw %cx, %cx +; AVX2-NEXT: xorl $15, %ecx +; AVX2-NEXT: vmovd %ecx, %xmm2 +; AVX2-NEXT: vpinsrw $1, %eax, %xmm2, %xmm2 +; AVX2-NEXT: vpextrw $2, %xmm0, %eax +; AVX2-NEXT: bsrw %ax, %ax +; AVX2-NEXT: xorl $15, %eax +; AVX2-NEXT: vpinsrw $2, %eax, %xmm2, %xmm2 +; AVX2-NEXT: vpextrw $3, %xmm0, %eax +; AVX2-NEXT: bsrw %ax, %ax +; AVX2-NEXT: xorl $15, %eax +; AVX2-NEXT: vpinsrw $3, %eax, %xmm2, %xmm2 +; AVX2-NEXT: vpextrw $4, %xmm0, %eax +; AVX2-NEXT: bsrw %ax, %ax +; AVX2-NEXT: xorl $15, %eax +; AVX2-NEXT: vpinsrw $4, %eax, %xmm2, %xmm2 +; AVX2-NEXT: vpextrw $5, %xmm0, %eax +; AVX2-NEXT: bsrw %ax, %ax +; AVX2-NEXT: xorl $15, %eax +; AVX2-NEXT: vpinsrw $5, %eax, %xmm2, %xmm2 +; AVX2-NEXT: vpextrw $6, %xmm0, %eax +; AVX2-NEXT: bsrw %ax, %ax +; AVX2-NEXT: xorl $15, %eax +; AVX2-NEXT: vpinsrw $6, %eax, %xmm2, %xmm2 +; AVX2-NEXT: vpextrw $7, %xmm0, %eax +; AVX2-NEXT: bsrw %ax, %ax +; AVX2-NEXT: xorl $15, %eax +; AVX2-NEXT: vpinsrw $7, %eax, %xmm2, %xmm0 +; AVX2-NEXT: vinserti128 $1, %xmm1, %ymm0, %ymm0 +; AVX2-NEXT: retq + %out = call <16 x i16> @llvm.ctlz.v16i16(<16 x i16> %in, i1 -1) + ret <16 x i16> %out +} + +define <32 x i8> @testv32i8(<32 x i8> %in) { +; AVX1-LABEL: testv32i8: +; AVX1: # BB#0: +; AVX1-NEXT: vextractf128 $1, %ymm0, %xmm1 +; AVX1-NEXT: vpextrb $1, %xmm1, %eax +; AVX1-NEXT: bsrl %eax, %ecx +; AVX1-NEXT: movl $15, %eax +; AVX1-NEXT: cmovel %eax, %ecx +; AVX1-NEXT: xorl $7, %ecx +; AVX1-NEXT: vpextrb $0, %xmm1, %edx +; AVX1-NEXT: bsrl %edx, %edx +; AVX1-NEXT: cmovel %eax, %edx +; AVX1-NEXT: xorl $7, %edx +; AVX1-NEXT: vmovd %edx, %xmm2 +; AVX1-NEXT: vpinsrb $1, %ecx, %xmm2, %xmm2 +; AVX1-NEXT: vpextrb $2, %xmm1, %ecx +; AVX1-NEXT: bsrl %ecx, %ecx +; AVX1-NEXT: cmovel %eax, %ecx +; AVX1-NEXT: xorl $7, %ecx +; AVX1-NEXT: vpinsrb $2, %ecx, %xmm2, %xmm2 +; AVX1-NEXT: vpextrb $3, %xmm1, %ecx +; AVX1-NEXT: bsrl %ecx, %ecx +; AVX1-NEXT: cmovel %eax, %ecx +; AVX1-NEXT: xorl $7, %ecx +; AVX1-NEXT: vpinsrb $3, %ecx, %xmm2, %xmm2 +; AVX1-NEXT: vpextrb $4, %xmm1, %ecx +; AVX1-NEXT: bsrl %ecx, %ecx +; AVX1-NEXT: cmovel %eax, %ecx +; AVX1-NEXT: xorl $7, %ecx +; AVX1-NEXT: vpinsrb $4, %ecx, %xmm2, %xmm2 +; AVX1-NEXT: vpextrb $5, %xmm1, %ecx +; AVX1-NEXT: bsrl %ecx, %ecx +; AVX1-NEXT: cmovel %eax, %ecx +; AVX1-NEXT: xorl $7, %ecx +; AVX1-NEXT: vpinsrb $5, %ecx, %xmm2, %xmm2 +; AVX1-NEXT: vpextrb $6, %xmm1, %ecx +; AVX1-NEXT: bsrl %ecx, %ecx +; AVX1-NEXT: cmovel %eax, %ecx +; AVX1-NEXT: xorl $7, %ecx +; AVX1-NEXT: vpinsrb $6, %ecx, %xmm2, %xmm2 +; AVX1-NEXT: vpextrb $7, %xmm1, %ecx +; AVX1-NEXT: bsrl %ecx, %ecx +; AVX1-NEXT: cmovel %eax, %ecx +; AVX1-NEXT: xorl $7, %ecx +; AVX1-NEXT: vpinsrb $7, %ecx, %xmm2, %xmm2 +; AVX1-NEXT: vpextrb $8, %xmm1, %ecx +; AVX1-NEXT: bsrl %ecx, %ecx +; AVX1-NEXT: cmovel %eax, %ecx +; AVX1-NEXT: xorl $7, %ecx +; AVX1-NEXT: vpinsrb $8, %ecx, %xmm2, %xmm2 +; AVX1-NEXT: vpextrb $9, %xmm1, %ecx +; AVX1-NEXT: bsrl %ecx, %ecx +; AVX1-NEXT: cmovel %eax, %ecx +; AVX1-NEXT: xorl $7, %ecx +; AVX1-NEXT: vpinsrb $9, %ecx, %xmm2, %xmm2 +; AVX1-NEXT: vpextrb $10, %xmm1, %ecx +; AVX1-NEXT: bsrl %ecx, %ecx +; AVX1-NEXT: cmovel %eax, %ecx +; AVX1-NEXT: xorl $7, %ecx +; AVX1-NEXT: vpinsrb $10, %ecx, %xmm2, %xmm2 +; AVX1-NEXT: vpextrb $11, %xmm1, %ecx +; AVX1-NEXT: bsrl %ecx, %ecx +; AVX1-NEXT: cmovel %eax, %ecx +; AVX1-NEXT: xorl $7, %ecx +; AVX1-NEXT: vpinsrb $11, %ecx, %xmm2, %xmm2 +; AVX1-NEXT: vpextrb $12, %xmm1, %ecx +; AVX1-NEXT: bsrl %ecx, %ecx +; AVX1-NEXT: cmovel %eax, %ecx +; AVX1-NEXT: xorl $7, %ecx +; AVX1-NEXT: vpinsrb $12, %ecx, %xmm2, %xmm2 +; AVX1-NEXT: vpextrb $13, %xmm1, %ecx +; AVX1-NEXT: bsrl %ecx, %ecx +; AVX1-NEXT: cmovel %eax, %ecx +; AVX1-NEXT: xorl $7, %ecx +; AVX1-NEXT: vpinsrb $13, %ecx, %xmm2, %xmm2 +; AVX1-NEXT: vpextrb $14, %xmm1, %ecx +; AVX1-NEXT: bsrl %ecx, %ecx +; AVX1-NEXT: cmovel %eax, %ecx +; AVX1-NEXT: xorl $7, %ecx +; AVX1-NEXT: vpinsrb $14, %ecx, %xmm2, %xmm2 +; AVX1-NEXT: vpextrb $15, %xmm1, %ecx +; AVX1-NEXT: bsrl %ecx, %ecx +; AVX1-NEXT: cmovel %eax, %ecx +; AVX1-NEXT: xorl $7, %ecx +; AVX1-NEXT: vpinsrb $15, %ecx, %xmm2, %xmm1 +; AVX1-NEXT: vpextrb $1, %xmm0, %ecx +; AVX1-NEXT: bsrl %ecx, %ecx +; AVX1-NEXT: cmovel %eax, %ecx +; AVX1-NEXT: xorl $7, %ecx +; AVX1-NEXT: vpextrb $0, %xmm0, %edx +; AVX1-NEXT: bsrl %edx, %edx +; AVX1-NEXT: cmovel %eax, %edx +; AVX1-NEXT: xorl $7, %edx +; AVX1-NEXT: vmovd %edx, %xmm2 +; AVX1-NEXT: vpinsrb $1, %ecx, %xmm2, %xmm2 +; AVX1-NEXT: vpextrb $2, %xmm0, %ecx +; AVX1-NEXT: bsrl %ecx, %ecx +; AVX1-NEXT: cmovel %eax, %ecx +; AVX1-NEXT: xorl $7, %ecx +; AVX1-NEXT: vpinsrb $2, %ecx, %xmm2, %xmm2 +; AVX1-NEXT: vpextrb $3, %xmm0, %ecx +; AVX1-NEXT: bsrl %ecx, %ecx +; AVX1-NEXT: cmovel %eax, %ecx +; AVX1-NEXT: xorl $7, %ecx +; AVX1-NEXT: vpinsrb $3, %ecx, %xmm2, %xmm2 +; AVX1-NEXT: vpextrb $4, %xmm0, %ecx +; AVX1-NEXT: bsrl %ecx, %ecx +; AVX1-NEXT: cmovel %eax, %ecx +; AVX1-NEXT: xorl $7, %ecx +; AVX1-NEXT: vpinsrb $4, %ecx, %xmm2, %xmm2 +; AVX1-NEXT: vpextrb $5, %xmm0, %ecx +; AVX1-NEXT: bsrl %ecx, %ecx +; AVX1-NEXT: cmovel %eax, %ecx +; AVX1-NEXT: xorl $7, %ecx +; AVX1-NEXT: vpinsrb $5, %ecx, %xmm2, %xmm2 +; AVX1-NEXT: vpextrb $6, %xmm0, %ecx +; AVX1-NEXT: bsrl %ecx, %ecx +; AVX1-NEXT: cmovel %eax, %ecx +; AVX1-NEXT: xorl $7, %ecx +; AVX1-NEXT: vpinsrb $6, %ecx, %xmm2, %xmm2 +; AVX1-NEXT: vpextrb $7, %xmm0, %ecx +; AVX1-NEXT: bsrl %ecx, %ecx +; AVX1-NEXT: cmovel %eax, %ecx +; AVX1-NEXT: xorl $7, %ecx +; AVX1-NEXT: vpinsrb $7, %ecx, %xmm2, %xmm2 +; AVX1-NEXT: vpextrb $8, %xmm0, %ecx +; AVX1-NEXT: bsrl %ecx, %ecx +; AVX1-NEXT: cmovel %eax, %ecx +; AVX1-NEXT: xorl $7, %ecx +; AVX1-NEXT: vpinsrb $8, %ecx, %xmm2, %xmm2 +; AVX1-NEXT: vpextrb $9, %xmm0, %ecx +; AVX1-NEXT: bsrl %ecx, %ecx +; AVX1-NEXT: cmovel %eax, %ecx +; AVX1-NEXT: xorl $7, %ecx +; AVX1-NEXT: vpinsrb $9, %ecx, %xmm2, %xmm2 +; AVX1-NEXT: vpextrb $10, %xmm0, %ecx +; AVX1-NEXT: bsrl %ecx, %ecx +; AVX1-NEXT: cmovel %eax, %ecx +; AVX1-NEXT: xorl $7, %ecx +; AVX1-NEXT: vpinsrb $10, %ecx, %xmm2, %xmm2 +; AVX1-NEXT: vpextrb $11, %xmm0, %ecx +; AVX1-NEXT: bsrl %ecx, %ecx +; AVX1-NEXT: cmovel %eax, %ecx +; AVX1-NEXT: xorl $7, %ecx +; AVX1-NEXT: vpinsrb $11, %ecx, %xmm2, %xmm2 +; AVX1-NEXT: vpextrb $12, %xmm0, %ecx +; AVX1-NEXT: bsrl %ecx, %ecx +; AVX1-NEXT: cmovel %eax, %ecx +; AVX1-NEXT: xorl $7, %ecx +; AVX1-NEXT: vpinsrb $12, %ecx, %xmm2, %xmm2 +; AVX1-NEXT: vpextrb $13, %xmm0, %ecx +; AVX1-NEXT: bsrl %ecx, %ecx +; AVX1-NEXT: cmovel %eax, %ecx +; AVX1-NEXT: xorl $7, %ecx +; AVX1-NEXT: vpinsrb $13, %ecx, %xmm2, %xmm2 +; AVX1-NEXT: vpextrb $14, %xmm0, %ecx +; AVX1-NEXT: bsrl %ecx, %ecx +; AVX1-NEXT: cmovel %eax, %ecx +; AVX1-NEXT: xorl $7, %ecx +; AVX1-NEXT: vpinsrb $14, %ecx, %xmm2, %xmm2 +; AVX1-NEXT: vpextrb $15, %xmm0, %ecx +; AVX1-NEXT: bsrl %ecx, %ecx +; AVX1-NEXT: cmovel %eax, %ecx +; AVX1-NEXT: xorl $7, %ecx +; AVX1-NEXT: vpinsrb $15, %ecx, %xmm2, %xmm0 +; AVX1-NEXT: vinsertf128 $1, %xmm1, %ymm0, %ymm0 +; AVX1-NEXT: retq +; +; AVX2-LABEL: testv32i8: +; AVX2: # BB#0: +; AVX2-NEXT: vextracti128 $1, %ymm0, %xmm1 +; AVX2-NEXT: vpextrb $1, %xmm1, %eax +; AVX2-NEXT: bsrl %eax, %ecx +; AVX2-NEXT: movl $15, %eax +; AVX2-NEXT: cmovel %eax, %ecx +; AVX2-NEXT: xorl $7, %ecx +; AVX2-NEXT: vpextrb $0, %xmm1, %edx +; AVX2-NEXT: bsrl %edx, %edx +; AVX2-NEXT: cmovel %eax, %edx +; AVX2-NEXT: xorl $7, %edx +; AVX2-NEXT: vmovd %edx, %xmm2 +; AVX2-NEXT: vpinsrb $1, %ecx, %xmm2, %xmm2 +; AVX2-NEXT: vpextrb $2, %xmm1, %ecx +; AVX2-NEXT: bsrl %ecx, %ecx +; AVX2-NEXT: cmovel %eax, %ecx +; AVX2-NEXT: xorl $7, %ecx +; AVX2-NEXT: vpinsrb $2, %ecx, %xmm2, %xmm2 +; AVX2-NEXT: vpextrb $3, %xmm1, %ecx +; AVX2-NEXT: bsrl %ecx, %ecx +; AVX2-NEXT: cmovel %eax, %ecx +; AVX2-NEXT: xorl $7, %ecx +; AVX2-NEXT: vpinsrb $3, %ecx, %xmm2, %xmm2 +; AVX2-NEXT: vpextrb $4, %xmm1, %ecx +; AVX2-NEXT: bsrl %ecx, %ecx +; AVX2-NEXT: cmovel %eax, %ecx +; AVX2-NEXT: xorl $7, %ecx +; AVX2-NEXT: vpinsrb $4, %ecx, %xmm2, %xmm2 +; AVX2-NEXT: vpextrb $5, %xmm1, %ecx +; AVX2-NEXT: bsrl %ecx, %ecx +; AVX2-NEXT: cmovel %eax, %ecx +; AVX2-NEXT: xorl $7, %ecx +; AVX2-NEXT: vpinsrb $5, %ecx, %xmm2, %xmm2 +; AVX2-NEXT: vpextrb $6, %xmm1, %ecx +; AVX2-NEXT: bsrl %ecx, %ecx +; AVX2-NEXT: cmovel %eax, %ecx +; AVX2-NEXT: xorl $7, %ecx +; AVX2-NEXT: vpinsrb $6, %ecx, %xmm2, %xmm2 +; AVX2-NEXT: vpextrb $7, %xmm1, %ecx +; AVX2-NEXT: bsrl %ecx, %ecx +; AVX2-NEXT: cmovel %eax, %ecx +; AVX2-NEXT: xorl $7, %ecx +; AVX2-NEXT: vpinsrb $7, %ecx, %xmm2, %xmm2 +; AVX2-NEXT: vpextrb $8, %xmm1, %ecx +; AVX2-NEXT: bsrl %ecx, %ecx +; AVX2-NEXT: cmovel %eax, %ecx +; AVX2-NEXT: xorl $7, %ecx +; AVX2-NEXT: vpinsrb $8, %ecx, %xmm2, %xmm2 +; AVX2-NEXT: vpextrb $9, %xmm1, %ecx +; AVX2-NEXT: bsrl %ecx, %ecx +; AVX2-NEXT: cmovel %eax, %ecx +; AVX2-NEXT: xorl $7, %ecx +; AVX2-NEXT: vpinsrb $9, %ecx, %xmm2, %xmm2 +; AVX2-NEXT: vpextrb $10, %xmm1, %ecx +; AVX2-NEXT: bsrl %ecx, %ecx +; AVX2-NEXT: cmovel %eax, %ecx +; AVX2-NEXT: xorl $7, %ecx +; AVX2-NEXT: vpinsrb $10, %ecx, %xmm2, %xmm2 +; AVX2-NEXT: vpextrb $11, %xmm1, %ecx +; AVX2-NEXT: bsrl %ecx, %ecx +; AVX2-NEXT: cmovel %eax, %ecx +; AVX2-NEXT: xorl $7, %ecx +; AVX2-NEXT: vpinsrb $11, %ecx, %xmm2, %xmm2 +; AVX2-NEXT: vpextrb $12, %xmm1, %ecx +; AVX2-NEXT: bsrl %ecx, %ecx +; AVX2-NEXT: cmovel %eax, %ecx +; AVX2-NEXT: xorl $7, %ecx +; AVX2-NEXT: vpinsrb $12, %ecx, %xmm2, %xmm2 +; AVX2-NEXT: vpextrb $13, %xmm1, %ecx +; AVX2-NEXT: bsrl %ecx, %ecx +; AVX2-NEXT: cmovel %eax, %ecx +; AVX2-NEXT: xorl $7, %ecx +; AVX2-NEXT: vpinsrb $13, %ecx, %xmm2, %xmm2 +; AVX2-NEXT: vpextrb $14, %xmm1, %ecx +; AVX2-NEXT: bsrl %ecx, %ecx +; AVX2-NEXT: cmovel %eax, %ecx +; AVX2-NEXT: xorl $7, %ecx +; AVX2-NEXT: vpinsrb $14, %ecx, %xmm2, %xmm2 +; AVX2-NEXT: vpextrb $15, %xmm1, %ecx +; AVX2-NEXT: bsrl %ecx, %ecx +; AVX2-NEXT: cmovel %eax, %ecx +; AVX2-NEXT: xorl $7, %ecx +; AVX2-NEXT: vpinsrb $15, %ecx, %xmm2, %xmm1 +; AVX2-NEXT: vpextrb $1, %xmm0, %ecx +; AVX2-NEXT: bsrl %ecx, %ecx +; AVX2-NEXT: cmovel %eax, %ecx +; AVX2-NEXT: xorl $7, %ecx +; AVX2-NEXT: vpextrb $0, %xmm0, %edx +; AVX2-NEXT: bsrl %edx, %edx +; AVX2-NEXT: cmovel %eax, %edx +; AVX2-NEXT: xorl $7, %edx +; AVX2-NEXT: vmovd %edx, %xmm2 +; AVX2-NEXT: vpinsrb $1, %ecx, %xmm2, %xmm2 +; AVX2-NEXT: vpextrb $2, %xmm0, %ecx +; AVX2-NEXT: bsrl %ecx, %ecx +; AVX2-NEXT: cmovel %eax, %ecx +; AVX2-NEXT: xorl $7, %ecx +; AVX2-NEXT: vpinsrb $2, %ecx, %xmm2, %xmm2 +; AVX2-NEXT: vpextrb $3, %xmm0, %ecx +; AVX2-NEXT: bsrl %ecx, %ecx +; AVX2-NEXT: cmovel %eax, %ecx +; AVX2-NEXT: xorl $7, %ecx +; AVX2-NEXT: vpinsrb $3, %ecx, %xmm2, %xmm2 +; AVX2-NEXT: vpextrb $4, %xmm0, %ecx +; AVX2-NEXT: bsrl %ecx, %ecx +; AVX2-NEXT: cmovel %eax, %ecx +; AVX2-NEXT: xorl $7, %ecx +; AVX2-NEXT: vpinsrb $4, %ecx, %xmm2, %xmm2 +; AVX2-NEXT: vpextrb $5, %xmm0, %ecx +; AVX2-NEXT: bsrl %ecx, %ecx +; AVX2-NEXT: cmovel %eax, %ecx +; AVX2-NEXT: xorl $7, %ecx +; AVX2-NEXT: vpinsrb $5, %ecx, %xmm2, %xmm2 +; AVX2-NEXT: vpextrb $6, %xmm0, %ecx +; AVX2-NEXT: bsrl %ecx, %ecx +; AVX2-NEXT: cmovel %eax, %ecx +; AVX2-NEXT: xorl $7, %ecx +; AVX2-NEXT: vpinsrb $6, %ecx, %xmm2, %xmm2 +; AVX2-NEXT: vpextrb $7, %xmm0, %ecx +; AVX2-NEXT: bsrl %ecx, %ecx +; AVX2-NEXT: cmovel %eax, %ecx +; AVX2-NEXT: xorl $7, %ecx +; AVX2-NEXT: vpinsrb $7, %ecx, %xmm2, %xmm2 +; AVX2-NEXT: vpextrb $8, %xmm0, %ecx +; AVX2-NEXT: bsrl %ecx, %ecx +; AVX2-NEXT: cmovel %eax, %ecx +; AVX2-NEXT: xorl $7, %ecx +; AVX2-NEXT: vpinsrb $8, %ecx, %xmm2, %xmm2 +; AVX2-NEXT: vpextrb $9, %xmm0, %ecx +; AVX2-NEXT: bsrl %ecx, %ecx +; AVX2-NEXT: cmovel %eax, %ecx +; AVX2-NEXT: xorl $7, %ecx +; AVX2-NEXT: vpinsrb $9, %ecx, %xmm2, %xmm2 +; AVX2-NEXT: vpextrb $10, %xmm0, %ecx +; AVX2-NEXT: bsrl %ecx, %ecx +; AVX2-NEXT: cmovel %eax, %ecx +; AVX2-NEXT: xorl $7, %ecx +; AVX2-NEXT: vpinsrb $10, %ecx, %xmm2, %xmm2 +; AVX2-NEXT: vpextrb $11, %xmm0, %ecx +; AVX2-NEXT: bsrl %ecx, %ecx +; AVX2-NEXT: cmovel %eax, %ecx +; AVX2-NEXT: xorl $7, %ecx +; AVX2-NEXT: vpinsrb $11, %ecx, %xmm2, %xmm2 +; AVX2-NEXT: vpextrb $12, %xmm0, %ecx +; AVX2-NEXT: bsrl %ecx, %ecx +; AVX2-NEXT: cmovel %eax, %ecx +; AVX2-NEXT: xorl $7, %ecx +; AVX2-NEXT: vpinsrb $12, %ecx, %xmm2, %xmm2 +; AVX2-NEXT: vpextrb $13, %xmm0, %ecx +; AVX2-NEXT: bsrl %ecx, %ecx +; AVX2-NEXT: cmovel %eax, %ecx +; AVX2-NEXT: xorl $7, %ecx +; AVX2-NEXT: vpinsrb $13, %ecx, %xmm2, %xmm2 +; AVX2-NEXT: vpextrb $14, %xmm0, %ecx +; AVX2-NEXT: bsrl %ecx, %ecx +; AVX2-NEXT: cmovel %eax, %ecx +; AVX2-NEXT: xorl $7, %ecx +; AVX2-NEXT: vpinsrb $14, %ecx, %xmm2, %xmm2 +; AVX2-NEXT: vpextrb $15, %xmm0, %ecx +; AVX2-NEXT: bsrl %ecx, %ecx +; AVX2-NEXT: cmovel %eax, %ecx +; AVX2-NEXT: xorl $7, %ecx +; AVX2-NEXT: vpinsrb $15, %ecx, %xmm2, %xmm0 +; AVX2-NEXT: vinserti128 $1, %xmm1, %ymm0, %ymm0 +; AVX2-NEXT: retq + %out = call <32 x i8> @llvm.ctlz.v32i8(<32 x i8> %in, i1 0) + ret <32 x i8> %out +} + +define <32 x i8> @testv32i8u(<32 x i8> %in) { +; AVX1-LABEL: testv32i8u: +; AVX1: # BB#0: +; AVX1-NEXT: vextractf128 $1, %ymm0, %xmm1 +; AVX1-NEXT: vpextrb $1, %xmm1, %eax +; AVX1-NEXT: bsrl %eax, %eax +; AVX1-NEXT: xorl $7, %eax +; AVX1-NEXT: vpextrb $0, %xmm1, %ecx +; AVX1-NEXT: bsrl %ecx, %ecx +; AVX1-NEXT: xorl $7, %ecx +; AVX1-NEXT: vmovd %ecx, %xmm2 +; AVX1-NEXT: vpinsrb $1, %eax, %xmm2, %xmm2 +; AVX1-NEXT: vpextrb $2, %xmm1, %eax +; AVX1-NEXT: bsrl %eax, %eax +; AVX1-NEXT: xorl $7, %eax +; AVX1-NEXT: vpinsrb $2, %eax, %xmm2, %xmm2 +; AVX1-NEXT: vpextrb $3, %xmm1, %eax +; AVX1-NEXT: bsrl %eax, %eax +; AVX1-NEXT: xorl $7, %eax +; AVX1-NEXT: vpinsrb $3, %eax, %xmm2, %xmm2 +; AVX1-NEXT: vpextrb $4, %xmm1, %eax +; AVX1-NEXT: bsrl %eax, %eax +; AVX1-NEXT: xorl $7, %eax +; AVX1-NEXT: vpinsrb $4, %eax, %xmm2, %xmm2 +; AVX1-NEXT: vpextrb $5, %xmm1, %eax +; AVX1-NEXT: bsrl %eax, %eax +; AVX1-NEXT: xorl $7, %eax +; AVX1-NEXT: vpinsrb $5, %eax, %xmm2, %xmm2 +; AVX1-NEXT: vpextrb $6, %xmm1, %eax +; AVX1-NEXT: bsrl %eax, %eax +; AVX1-NEXT: xorl $7, %eax +; AVX1-NEXT: vpinsrb $6, %eax, %xmm2, %xmm2 +; AVX1-NEXT: vpextrb $7, %xmm1, %eax +; AVX1-NEXT: bsrl %eax, %eax +; AVX1-NEXT: xorl $7, %eax +; AVX1-NEXT: vpinsrb $7, %eax, %xmm2, %xmm2 +; AVX1-NEXT: vpextrb $8, %xmm1, %eax +; AVX1-NEXT: bsrl %eax, %eax +; AVX1-NEXT: xorl $7, %eax +; AVX1-NEXT: vpinsrb $8, %eax, %xmm2, %xmm2 +; AVX1-NEXT: vpextrb $9, %xmm1, %eax +; AVX1-NEXT: bsrl %eax, %eax +; AVX1-NEXT: xorl $7, %eax +; AVX1-NEXT: vpinsrb $9, %eax, %xmm2, %xmm2 +; AVX1-NEXT: vpextrb $10, %xmm1, %eax +; AVX1-NEXT: bsrl %eax, %eax +; AVX1-NEXT: xorl $7, %eax +; AVX1-NEXT: vpinsrb $10, %eax, %xmm2, %xmm2 +; AVX1-NEXT: vpextrb $11, %xmm1, %eax +; AVX1-NEXT: bsrl %eax, %eax +; AVX1-NEXT: xorl $7, %eax +; AVX1-NEXT: vpinsrb $11, %eax, %xmm2, %xmm2 +; AVX1-NEXT: vpextrb $12, %xmm1, %eax +; AVX1-NEXT: bsrl %eax, %eax +; AVX1-NEXT: xorl $7, %eax +; AVX1-NEXT: vpinsrb $12, %eax, %xmm2, %xmm2 +; AVX1-NEXT: vpextrb $13, %xmm1, %eax +; AVX1-NEXT: bsrl %eax, %eax +; AVX1-NEXT: xorl $7, %eax +; AVX1-NEXT: vpinsrb $13, %eax, %xmm2, %xmm2 +; AVX1-NEXT: vpextrb $14, %xmm1, %eax +; AVX1-NEXT: bsrl %eax, %eax +; AVX1-NEXT: xorl $7, %eax +; AVX1-NEXT: vpinsrb $14, %eax, %xmm2, %xmm2 +; AVX1-NEXT: vpextrb $15, %xmm1, %eax +; AVX1-NEXT: bsrl %eax, %eax +; AVX1-NEXT: xorl $7, %eax +; AVX1-NEXT: vpinsrb $15, %eax, %xmm2, %xmm1 +; AVX1-NEXT: vpextrb $1, %xmm0, %eax +; AVX1-NEXT: bsrl %eax, %eax +; AVX1-NEXT: xorl $7, %eax +; AVX1-NEXT: vpextrb $0, %xmm0, %ecx +; AVX1-NEXT: bsrl %ecx, %ecx +; AVX1-NEXT: xorl $7, %ecx +; AVX1-NEXT: vmovd %ecx, %xmm2 +; AVX1-NEXT: vpinsrb $1, %eax, %xmm2, %xmm2 +; AVX1-NEXT: vpextrb $2, %xmm0, %eax +; AVX1-NEXT: bsrl %eax, %eax +; AVX1-NEXT: xorl $7, %eax +; AVX1-NEXT: vpinsrb $2, %eax, %xmm2, %xmm2 +; AVX1-NEXT: vpextrb $3, %xmm0, %eax +; AVX1-NEXT: bsrl %eax, %eax +; AVX1-NEXT: xorl $7, %eax +; AVX1-NEXT: vpinsrb $3, %eax, %xmm2, %xmm2 +; AVX1-NEXT: vpextrb $4, %xmm0, %eax +; AVX1-NEXT: bsrl %eax, %eax +; AVX1-NEXT: xorl $7, %eax +; AVX1-NEXT: vpinsrb $4, %eax, %xmm2, %xmm2 +; AVX1-NEXT: vpextrb $5, %xmm0, %eax +; AVX1-NEXT: bsrl %eax, %eax +; AVX1-NEXT: xorl $7, %eax +; AVX1-NEXT: vpinsrb $5, %eax, %xmm2, %xmm2 +; AVX1-NEXT: vpextrb $6, %xmm0, %eax +; AVX1-NEXT: bsrl %eax, %eax +; AVX1-NEXT: xorl $7, %eax +; AVX1-NEXT: vpinsrb $6, %eax, %xmm2, %xmm2 +; AVX1-NEXT: vpextrb $7, %xmm0, %eax +; AVX1-NEXT: bsrl %eax, %eax +; AVX1-NEXT: xorl $7, %eax +; AVX1-NEXT: vpinsrb $7, %eax, %xmm2, %xmm2 +; AVX1-NEXT: vpextrb $8, %xmm0, %eax +; AVX1-NEXT: bsrl %eax, %eax +; AVX1-NEXT: xorl $7, %eax +; AVX1-NEXT: vpinsrb $8, %eax, %xmm2, %xmm2 +; AVX1-NEXT: vpextrb $9, %xmm0, %eax +; AVX1-NEXT: bsrl %eax, %eax +; AVX1-NEXT: xorl $7, %eax +; AVX1-NEXT: vpinsrb $9, %eax, %xmm2, %xmm2 +; AVX1-NEXT: vpextrb $10, %xmm0, %eax +; AVX1-NEXT: bsrl %eax, %eax +; AVX1-NEXT: xorl $7, %eax +; AVX1-NEXT: vpinsrb $10, %eax, %xmm2, %xmm2 +; AVX1-NEXT: vpextrb $11, %xmm0, %eax +; AVX1-NEXT: bsrl %eax, %eax +; AVX1-NEXT: xorl $7, %eax +; AVX1-NEXT: vpinsrb $11, %eax, %xmm2, %xmm2 +; AVX1-NEXT: vpextrb $12, %xmm0, %eax +; AVX1-NEXT: bsrl %eax, %eax +; AVX1-NEXT: xorl $7, %eax +; AVX1-NEXT: vpinsrb $12, %eax, %xmm2, %xmm2 +; AVX1-NEXT: vpextrb $13, %xmm0, %eax +; AVX1-NEXT: bsrl %eax, %eax +; AVX1-NEXT: xorl $7, %eax +; AVX1-NEXT: vpinsrb $13, %eax, %xmm2, %xmm2 +; AVX1-NEXT: vpextrb $14, %xmm0, %eax +; AVX1-NEXT: bsrl %eax, %eax +; AVX1-NEXT: xorl $7, %eax +; AVX1-NEXT: vpinsrb $14, %eax, %xmm2, %xmm2 +; AVX1-NEXT: vpextrb $15, %xmm0, %eax +; AVX1-NEXT: bsrl %eax, %eax +; AVX1-NEXT: xorl $7, %eax +; AVX1-NEXT: vpinsrb $15, %eax, %xmm2, %xmm0 +; AVX1-NEXT: vinsertf128 $1, %xmm1, %ymm0, %ymm0 +; AVX1-NEXT: retq +; +; AVX2-LABEL: testv32i8u: +; AVX2: # BB#0: +; AVX2-NEXT: vextracti128 $1, %ymm0, %xmm1 +; AVX2-NEXT: vpextrb $1, %xmm1, %eax +; AVX2-NEXT: bsrl %eax, %eax +; AVX2-NEXT: xorl $7, %eax +; AVX2-NEXT: vpextrb $0, %xmm1, %ecx +; AVX2-NEXT: bsrl %ecx, %ecx +; AVX2-NEXT: xorl $7, %ecx +; AVX2-NEXT: vmovd %ecx, %xmm2 +; AVX2-NEXT: vpinsrb $1, %eax, %xmm2, %xmm2 +; AVX2-NEXT: vpextrb $2, %xmm1, %eax +; AVX2-NEXT: bsrl %eax, %eax +; AVX2-NEXT: xorl $7, %eax +; AVX2-NEXT: vpinsrb $2, %eax, %xmm2, %xmm2 +; AVX2-NEXT: vpextrb $3, %xmm1, %eax +; AVX2-NEXT: bsrl %eax, %eax +; AVX2-NEXT: xorl $7, %eax +; AVX2-NEXT: vpinsrb $3, %eax, %xmm2, %xmm2 +; AVX2-NEXT: vpextrb $4, %xmm1, %eax +; AVX2-NEXT: bsrl %eax, %eax +; AVX2-NEXT: xorl $7, %eax +; AVX2-NEXT: vpinsrb $4, %eax, %xmm2, %xmm2 +; AVX2-NEXT: vpextrb $5, %xmm1, %eax +; AVX2-NEXT: bsrl %eax, %eax +; AVX2-NEXT: xorl $7, %eax +; AVX2-NEXT: vpinsrb $5, %eax, %xmm2, %xmm2 +; AVX2-NEXT: vpextrb $6, %xmm1, %eax +; AVX2-NEXT: bsrl %eax, %eax +; AVX2-NEXT: xorl $7, %eax +; AVX2-NEXT: vpinsrb $6, %eax, %xmm2, %xmm2 +; AVX2-NEXT: vpextrb $7, %xmm1, %eax +; AVX2-NEXT: bsrl %eax, %eax +; AVX2-NEXT: xorl $7, %eax +; AVX2-NEXT: vpinsrb $7, %eax, %xmm2, %xmm2 +; AVX2-NEXT: vpextrb $8, %xmm1, %eax +; AVX2-NEXT: bsrl %eax, %eax +; AVX2-NEXT: xorl $7, %eax +; AVX2-NEXT: vpinsrb $8, %eax, %xmm2, %xmm2 +; AVX2-NEXT: vpextrb $9, %xmm1, %eax +; AVX2-NEXT: bsrl %eax, %eax +; AVX2-NEXT: xorl $7, %eax +; AVX2-NEXT: vpinsrb $9, %eax, %xmm2, %xmm2 +; AVX2-NEXT: vpextrb $10, %xmm1, %eax +; AVX2-NEXT: bsrl %eax, %eax +; AVX2-NEXT: xorl $7, %eax +; AVX2-NEXT: vpinsrb $10, %eax, %xmm2, %xmm2 +; AVX2-NEXT: vpextrb $11, %xmm1, %eax +; AVX2-NEXT: bsrl %eax, %eax +; AVX2-NEXT: xorl $7, %eax +; AVX2-NEXT: vpinsrb $11, %eax, %xmm2, %xmm2 +; AVX2-NEXT: vpextrb $12, %xmm1, %eax +; AVX2-NEXT: bsrl %eax, %eax +; AVX2-NEXT: xorl $7, %eax +; AVX2-NEXT: vpinsrb $12, %eax, %xmm2, %xmm2 +; AVX2-NEXT: vpextrb $13, %xmm1, %eax +; AVX2-NEXT: bsrl %eax, %eax +; AVX2-NEXT: xorl $7, %eax +; AVX2-NEXT: vpinsrb $13, %eax, %xmm2, %xmm2 +; AVX2-NEXT: vpextrb $14, %xmm1, %eax +; AVX2-NEXT: bsrl %eax, %eax +; AVX2-NEXT: xorl $7, %eax +; AVX2-NEXT: vpinsrb $14, %eax, %xmm2, %xmm2 +; AVX2-NEXT: vpextrb $15, %xmm1, %eax +; AVX2-NEXT: bsrl %eax, %eax +; AVX2-NEXT: xorl $7, %eax +; AVX2-NEXT: vpinsrb $15, %eax, %xmm2, %xmm1 +; AVX2-NEXT: vpextrb $1, %xmm0, %eax +; AVX2-NEXT: bsrl %eax, %eax +; AVX2-NEXT: xorl $7, %eax +; AVX2-NEXT: vpextrb $0, %xmm0, %ecx +; AVX2-NEXT: bsrl %ecx, %ecx +; AVX2-NEXT: xorl $7, %ecx +; AVX2-NEXT: vmovd %ecx, %xmm2 +; AVX2-NEXT: vpinsrb $1, %eax, %xmm2, %xmm2 +; AVX2-NEXT: vpextrb $2, %xmm0, %eax +; AVX2-NEXT: bsrl %eax, %eax +; AVX2-NEXT: xorl $7, %eax +; AVX2-NEXT: vpinsrb $2, %eax, %xmm2, %xmm2 +; AVX2-NEXT: vpextrb $3, %xmm0, %eax +; AVX2-NEXT: bsrl %eax, %eax +; AVX2-NEXT: xorl $7, %eax +; AVX2-NEXT: vpinsrb $3, %eax, %xmm2, %xmm2 +; AVX2-NEXT: vpextrb $4, %xmm0, %eax +; AVX2-NEXT: bsrl %eax, %eax +; AVX2-NEXT: xorl $7, %eax +; AVX2-NEXT: vpinsrb $4, %eax, %xmm2, %xmm2 +; AVX2-NEXT: vpextrb $5, %xmm0, %eax +; AVX2-NEXT: bsrl %eax, %eax +; AVX2-NEXT: xorl $7, %eax +; AVX2-NEXT: vpinsrb $5, %eax, %xmm2, %xmm2 +; AVX2-NEXT: vpextrb $6, %xmm0, %eax +; AVX2-NEXT: bsrl %eax, %eax +; AVX2-NEXT: xorl $7, %eax +; AVX2-NEXT: vpinsrb $6, %eax, %xmm2, %xmm2 +; AVX2-NEXT: vpextrb $7, %xmm0, %eax +; AVX2-NEXT: bsrl %eax, %eax +; AVX2-NEXT: xorl $7, %eax +; AVX2-NEXT: vpinsrb $7, %eax, %xmm2, %xmm2 +; AVX2-NEXT: vpextrb $8, %xmm0, %eax +; AVX2-NEXT: bsrl %eax, %eax +; AVX2-NEXT: xorl $7, %eax +; AVX2-NEXT: vpinsrb $8, %eax, %xmm2, %xmm2 +; AVX2-NEXT: vpextrb $9, %xmm0, %eax +; AVX2-NEXT: bsrl %eax, %eax +; AVX2-NEXT: xorl $7, %eax +; AVX2-NEXT: vpinsrb $9, %eax, %xmm2, %xmm2 +; AVX2-NEXT: vpextrb $10, %xmm0, %eax +; AVX2-NEXT: bsrl %eax, %eax +; AVX2-NEXT: xorl $7, %eax +; AVX2-NEXT: vpinsrb $10, %eax, %xmm2, %xmm2 +; AVX2-NEXT: vpextrb $11, %xmm0, %eax +; AVX2-NEXT: bsrl %eax, %eax +; AVX2-NEXT: xorl $7, %eax +; AVX2-NEXT: vpinsrb $11, %eax, %xmm2, %xmm2 +; AVX2-NEXT: vpextrb $12, %xmm0, %eax +; AVX2-NEXT: bsrl %eax, %eax +; AVX2-NEXT: xorl $7, %eax +; AVX2-NEXT: vpinsrb $12, %eax, %xmm2, %xmm2 +; AVX2-NEXT: vpextrb $13, %xmm0, %eax +; AVX2-NEXT: bsrl %eax, %eax +; AVX2-NEXT: xorl $7, %eax +; AVX2-NEXT: vpinsrb $13, %eax, %xmm2, %xmm2 +; AVX2-NEXT: vpextrb $14, %xmm0, %eax +; AVX2-NEXT: bsrl %eax, %eax +; AVX2-NEXT: xorl $7, %eax +; AVX2-NEXT: vpinsrb $14, %eax, %xmm2, %xmm2 +; AVX2-NEXT: vpextrb $15, %xmm0, %eax +; AVX2-NEXT: bsrl %eax, %eax +; AVX2-NEXT: xorl $7, %eax +; AVX2-NEXT: vpinsrb $15, %eax, %xmm2, %xmm0 +; AVX2-NEXT: vinserti128 $1, %xmm1, %ymm0, %ymm0 +; AVX2-NEXT: retq + %out = call <32 x i8> @llvm.ctlz.v32i8(<32 x i8> %in, i1 -1) + ret <32 x i8> %out +} + define <4 x i64> @foldv4i64() { ; AVX-LABEL: foldv4i64: ; AVX: # BB#0: -- 2.7.4