[SVE][VLS] Don't combine logical AND.
authorFrancesco Petrogalli <francesco.petrogalli@arm.com>
Fri, 31 Jul 2020 20:19:23 +0000 (21:19 +0100)
committerFrancesco Petrogalli <francesco.petrogalli@arm.com>
Wed, 12 Aug 2020 19:00:07 +0000 (20:00 +0100)
Testing is performed when targeting 128, 256 and 512-bit wide vectors.

For 128-bit vectors, the original behavior of using NEON instructions is
preserved.

Differential Revision: https://reviews.llvm.org/D85479

llvm/lib/Target/AArch64/AArch64ISelLowering.cpp
llvm/test/CodeGen/AArch64/sve-fix-length-and-combine-512.ll [new file with mode: 0644]

index 67ae6cc..b9da2cf 100644 (file)
@@ -11156,6 +11156,11 @@ static SDValue performANDCombine(SDNode *N,
   if (VT.isScalableVector())
     return performSVEAndCombine(N, DCI);
 
+  // The combining code below works only for NEON vectors. In particular, it
+  // does not work for SVE when dealing with vectors wider than 128 bits.
+  if (!(VT.is64BitVector() || VT.is128BitVector()))
+    return SDValue();
+
   BuildVectorSDNode *BVN =
       dyn_cast<BuildVectorSDNode>(N->getOperand(1).getNode());
   if (!BVN)
diff --git a/llvm/test/CodeGen/AArch64/sve-fix-length-and-combine-512.ll b/llvm/test/CodeGen/AArch64/sve-fix-length-and-combine-512.ll
new file mode 100644 (file)
index 0000000..ba65b5f
--- /dev/null
@@ -0,0 +1,37 @@
+; RUN: llc -mtriple=aarch64-linux-gnu -mattr=+sve -aarch64-sve-vector-bits-min=512  -o - -asm-verbose=0 < %s | FileCheck %s
+
+; CHECK-LABEL: vls_sve_and_64xi8:
+; CHECK-NEXT:  adrp    x[[ONE:[0-9]+]], .LCPI0_0
+; CHECK-NEXT:  ptrue   p0.b, vl64
+; CHECK-NEXT:  add     x[[TWO:[0-9]+]], x[[ONE]], :lo12:.LCPI0_0
+; CHECK-NEXT:  ld1b    { z0.b }, p0/z, [x0]
+; CHECK-NEXT:  ld1b    { z1.b }, p0/z, [x[[TWO]]]
+; CHECK-NEXT:  and     z0.d, z0.d, z1.d
+; CHECK-NEXT:  st1b    { z0.b }, p0, [x1]
+; CHECK-NEXT:  ret
+define void @vls_sve_and_64xi8(<64 x i8>* %ap, <64 x i8>* %out) nounwind {
+ %a = load <64 x i8>, <64 x i8>* %ap
+ %b = and <64 x i8> %a, <i8 0, i8 255, i8 0, i8 255, i8 0, i8 255, i8 0, i8 255, i8 0, i8 255, i8 0, i8 255, i8 0, i8 255, i8 0, i8 255,
+                         i8 0, i8 255, i8 0, i8 255, i8 0, i8 255, i8 0, i8 255, i8 0, i8 255, i8 0, i8 255, i8 0, i8 255, i8 0, i8 255,
+                         i8 0, i8 255, i8 0, i8 255, i8 0, i8 255, i8 0, i8 255, i8 0, i8 255, i8 0, i8 255, i8 0, i8 255, i8 0, i8 255,
+                         i8 0, i8 255, i8 0, i8 255, i8 0, i8 255, i8 0, i8 255, i8 0, i8 255, i8 0, i8 255, i8 0, i8 255, i8 0, i8 255>
+ store <64 x i8> %b, <64 x i8>* %out
+ ret void
+}
+
+; CHECK-LABEL: vls_sve_and_16xi8:
+; CHECK-NEXT:  bic     v0.8h, #255
+; CHECK-NEXT:  ret
+define <16 x i8> @vls_sve_and_16xi8(<16 x i8> %b, <16 x i8>* %out) nounwind {
+ %c = and <16 x i8> %b, <i8 0, i8 255, i8 0, i8 255, i8 0, i8 255, i8 0, i8 255, i8 0, i8 255, i8 0, i8 255, i8 0, i8 255, i8 0, i8 255>
+ ret <16 x i8> %c
+}
+
+; CHECK-LABEL: vls_sve_and_8xi8:
+; CHECK-NEXT:  bic     v0.4h, #255
+; CHECK-NEXT:  ret
+define <8 x i8> @vls_sve_and_8xi8(<8 x i8> %b, <8 x i8>* %out) nounwind {
+ %c = and <8 x i8> %b, <i8 0, i8 255, i8 0, i8 255, i8 0, i8 255, i8 0, i8 255>
+ ret <8 x i8> %c
+}
+