From 7425bdbd2faa9483be71dc5674264540d3cb4b84 Mon Sep 17 00:00:00 2001 From: Simon Pilgrim Date: Sat, 9 May 2020 14:41:27 +0100 Subject: [PATCH] [X86] Add test cases for 'abs from mul patterns' (PR45691) --- llvm/test/CodeGen/X86/combine-mul.ll | 79 ++++++++++++++++++++++++++++++++++++ 1 file changed, 79 insertions(+) diff --git a/llvm/test/CodeGen/X86/combine-mul.ll b/llvm/test/CodeGen/X86/combine-mul.ll index 98fbd37..3282d50 100644 --- a/llvm/test/CodeGen/X86/combine-mul.ll +++ b/llvm/test/CodeGen/X86/combine-mul.ll @@ -354,6 +354,85 @@ define <4 x i32> @combine_mul_abs_v4i32(<4 x i32> %0) { ret <4 x i32> %m } +; TODO fold Y = sra (X, size(X)-1); mul (or (Y, 1), X) -> (abs X) + +define <16 x i8> @combine_mul_to_abs_v16i8(<16 x i8> %x) { +; SSE-LABEL: combine_mul_to_abs_v16i8: +; SSE: # %bb.0: +; SSE-NEXT: pxor %xmm2, %xmm2 +; SSE-NEXT: pcmpgtb %xmm0, %xmm2 +; SSE-NEXT: por {{.*}}(%rip), %xmm2 +; SSE-NEXT: pmovzxbw {{.*#+}} xmm3 = xmm0[0],zero,xmm0[1],zero,xmm0[2],zero,xmm0[3],zero,xmm0[4],zero,xmm0[5],zero,xmm0[6],zero,xmm0[7],zero +; SSE-NEXT: punpckhbw {{.*#+}} xmm0 = xmm0[8,8,9,9,10,10,11,11,12,12,13,13,14,14,15,15] +; SSE-NEXT: pmovzxbw {{.*#+}} xmm1 = xmm2[0],zero,xmm2[1],zero,xmm2[2],zero,xmm2[3],zero,xmm2[4],zero,xmm2[5],zero,xmm2[6],zero,xmm2[7],zero +; SSE-NEXT: punpckhbw {{.*#+}} xmm2 = xmm2[8],xmm0[8],xmm2[9],xmm0[9],xmm2[10],xmm0[10],xmm2[11],xmm0[11],xmm2[12],xmm0[12],xmm2[13],xmm0[13],xmm2[14],xmm0[14],xmm2[15],xmm0[15] +; SSE-NEXT: pmullw %xmm0, %xmm2 +; SSE-NEXT: movdqa {{.*#+}} xmm0 = [255,255,255,255,255,255,255,255] +; SSE-NEXT: pand %xmm0, %xmm2 +; SSE-NEXT: pmullw %xmm3, %xmm1 +; SSE-NEXT: pand %xmm0, %xmm1 +; SSE-NEXT: packuswb %xmm2, %xmm1 +; SSE-NEXT: movdqa %xmm1, %xmm0 +; SSE-NEXT: retq +; +; AVX-LABEL: combine_mul_to_abs_v16i8: +; AVX: # %bb.0: +; AVX-NEXT: vpxor %xmm1, %xmm1, %xmm1 +; AVX-NEXT: vpcmpgtb %xmm0, %xmm1, %xmm1 +; AVX-NEXT: vpor {{.*}}(%rip), %xmm1, %xmm1 +; AVX-NEXT: vpmovzxbw {{.*#+}} ymm1 = xmm1[0],zero,xmm1[1],zero,xmm1[2],zero,xmm1[3],zero,xmm1[4],zero,xmm1[5],zero,xmm1[6],zero,xmm1[7],zero,xmm1[8],zero,xmm1[9],zero,xmm1[10],zero,xmm1[11],zero,xmm1[12],zero,xmm1[13],zero,xmm1[14],zero,xmm1[15],zero +; AVX-NEXT: vpmovzxbw {{.*#+}} ymm0 = xmm0[0],zero,xmm0[1],zero,xmm0[2],zero,xmm0[3],zero,xmm0[4],zero,xmm0[5],zero,xmm0[6],zero,xmm0[7],zero,xmm0[8],zero,xmm0[9],zero,xmm0[10],zero,xmm0[11],zero,xmm0[12],zero,xmm0[13],zero,xmm0[14],zero,xmm0[15],zero +; AVX-NEXT: vpmullw %ymm0, %ymm1, %ymm0 +; AVX-NEXT: vpand {{.*}}(%rip), %ymm0, %ymm0 +; AVX-NEXT: vextracti128 $1, %ymm0, %xmm1 +; AVX-NEXT: vpackuswb %xmm1, %xmm0, %xmm0 +; AVX-NEXT: vzeroupper +; AVX-NEXT: retq + %s = ashr <16 x i8> %x, + %o = or <16 x i8> %s, + %m = mul <16 x i8> %o, %x + ret <16 x i8> %m +} + +define <2 x i64> @combine_mul_to_abs_v2i64(<2 x i64> %x) { +; SSE-LABEL: combine_mul_to_abs_v2i64: +; SSE: # %bb.0: +; SSE-NEXT: movdqa %xmm0, %xmm1 +; SSE-NEXT: psrad $31, %xmm1 +; SSE-NEXT: pshufd {{.*#+}} xmm1 = xmm1[1,1,3,3] +; SSE-NEXT: por {{.*}}(%rip), %xmm1 +; SSE-NEXT: movdqa %xmm0, %xmm2 +; SSE-NEXT: psrlq $32, %xmm2 +; SSE-NEXT: pmuludq %xmm1, %xmm2 +; SSE-NEXT: movdqa %xmm1, %xmm3 +; SSE-NEXT: psrlq $32, %xmm3 +; SSE-NEXT: pmuludq %xmm0, %xmm3 +; SSE-NEXT: paddq %xmm2, %xmm3 +; SSE-NEXT: psllq $32, %xmm3 +; SSE-NEXT: pmuludq %xmm1, %xmm0 +; SSE-NEXT: paddq %xmm3, %xmm0 +; SSE-NEXT: retq +; +; AVX-LABEL: combine_mul_to_abs_v2i64: +; AVX: # %bb.0: +; AVX-NEXT: vpxor %xmm1, %xmm1, %xmm1 +; AVX-NEXT: vpcmpgtq %xmm0, %xmm1, %xmm1 +; AVX-NEXT: vpor {{.*}}(%rip), %xmm1, %xmm1 +; AVX-NEXT: vpsrlq $32, %xmm0, %xmm2 +; AVX-NEXT: vpmuludq %xmm1, %xmm2, %xmm2 +; AVX-NEXT: vpsrlq $32, %xmm1, %xmm3 +; AVX-NEXT: vpmuludq %xmm3, %xmm0, %xmm3 +; AVX-NEXT: vpaddq %xmm2, %xmm3, %xmm2 +; AVX-NEXT: vpsllq $32, %xmm2, %xmm2 +; AVX-NEXT: vpmuludq %xmm1, %xmm0, %xmm0 +; AVX-NEXT: vpaddq %xmm2, %xmm0, %xmm0 +; AVX-NEXT: retq + %s = ashr <2 x i64> %x, + %o = or <2 x i64> %s, + %m = mul <2 x i64> %x, %o + ret <2 x i64> %m +} + ; This would infinite loop because DAGCombiner wants to turn this into a shift, ; but x86 lowering wants to avoid non-uniform vector shift amounts. -- 2.7.4