[AArch64][GlobalISel] Make <4 x s32> G_ASHR and G_LSHR legal.
authorAmara Emerson <aemerson@apple.com>
Sat, 21 Sep 2019 09:21:10 +0000 (09:21 +0000)
committerAmara Emerson <aemerson@apple.com>
Sat, 21 Sep 2019 09:21:10 +0000 (09:21 +0000)
llvm-svn: 372465

llvm/lib/Target/AArch64/AArch64LegalizerInfo.cpp
llvm/test/CodeGen/AArch64/GlobalISel/legalize-vector-shift.mir [new file with mode: 0644]

index 2fd5e0d..7ff7902 100644 (file)
@@ -124,8 +124,12 @@ AArch64LegalizerInfo::AArch64LegalizerInfo(const AArch64Subtarget &ST) {
         return !SrcTy.isVector() && SrcTy.getSizeInBits() == 32 &&
                AmtTy.getSizeInBits() == 32;
       })
-      .legalFor(
-          {{s32, s32}, {s32, s64}, {s64, s64}, {v2s32, v2s32}, {v4s32, v4s32}})
+      .legalFor({{s32, s32},
+                 {s32, s64},
+                 {s64, s64},
+                 {v2s32, v2s32},
+                 {v4s32, v4s32},
+                 {v2s64, v2s64}})
       .clampScalar(1, s32, s64)
       .clampScalar(0, s32, s64)
       .minScalarSameAs(1, 0);
diff --git a/llvm/test/CodeGen/AArch64/GlobalISel/legalize-vector-shift.mir b/llvm/test/CodeGen/AArch64/GlobalISel/legalize-vector-shift.mir
new file mode 100644 (file)
index 0000000..24c551c
--- /dev/null
@@ -0,0 +1,78 @@
+# NOTE: Assertions have been autogenerated by utils/update_mir_test_checks.py
+# RUN: llc -O0 -march=aarch64 -run-pass=legalizer -global-isel-abort=1 %s -o - | FileCheck %s
+---
+name:            lshr_v4s32
+body:             |
+  bb.1:
+    liveins: $q0, $q1
+
+    ; CHECK-LABEL: name: lshr_v4s32
+    ; CHECK: [[COPY:%[0-9]+]]:_(<4 x s32>) = COPY $q0
+    ; CHECK: [[COPY1:%[0-9]+]]:_(<4 x s32>) = COPY $q1
+    ; CHECK: [[LSHR:%[0-9]+]]:_(<4 x s32>) = G_LSHR [[COPY]], [[COPY1]](<4 x s32>)
+    ; CHECK: $q0 = COPY [[LSHR]](<4 x s32>)
+    ; CHECK: RET_ReallyLR implicit $q0
+    %0:_(<4 x s32>) = COPY $q0
+    %1:_(<4 x s32>) = COPY $q1
+    %2:_(<4 x s32>) = G_LSHR %0, %1(<4 x s32>)
+    $q0 = COPY %2(<4 x s32>)
+    RET_ReallyLR implicit $q0
+
+...
+---
+name:            lshr_v2s64
+body:             |
+  bb.1:
+    liveins: $q0, $q1
+
+    ; CHECK-LABEL: name: lshr_v2s64
+    ; CHECK: [[COPY:%[0-9]+]]:_(<2 x s64>) = COPY $q0
+    ; CHECK: [[COPY1:%[0-9]+]]:_(<2 x s64>) = COPY $q1
+    ; CHECK: [[LSHR:%[0-9]+]]:_(<2 x s64>) = G_LSHR [[COPY]], [[COPY1]](<2 x s64>)
+    ; CHECK: $q0 = COPY [[LSHR]](<2 x s64>)
+    ; CHECK: RET_ReallyLR implicit $q0
+    %0:_(<2 x s64>) = COPY $q0
+    %1:_(<2 x s64>) = COPY $q1
+    %2:_(<2 x s64>) = G_LSHR %0, %1(<2 x s64>)
+    $q0 = COPY %2(<2 x s64>)
+    RET_ReallyLR implicit $q0
+
+...
+---
+name:            ashr_v4s32
+body:             |
+  bb.1:
+    liveins: $q0, $q1
+
+    ; CHECK-LABEL: name: ashr_v4s32
+    ; CHECK: [[COPY:%[0-9]+]]:_(<4 x s32>) = COPY $q0
+    ; CHECK: [[COPY1:%[0-9]+]]:_(<4 x s32>) = COPY $q1
+    ; CHECK: [[ASHR:%[0-9]+]]:_(<4 x s32>) = G_ASHR [[COPY]], [[COPY1]](<4 x s32>)
+    ; CHECK: $q0 = COPY [[ASHR]](<4 x s32>)
+    ; CHECK: RET_ReallyLR implicit $q0
+    %0:_(<4 x s32>) = COPY $q0
+    %1:_(<4 x s32>) = COPY $q1
+    %2:_(<4 x s32>) = G_ASHR %0, %1(<4 x s32>)
+    $q0 = COPY %2(<4 x s32>)
+    RET_ReallyLR implicit $q0
+
+...
+---
+name:            ashr_v2s64
+body:             |
+  bb.1:
+    liveins: $q0, $q1
+
+    ; CHECK-LABEL: name: ashr_v2s64
+    ; CHECK: [[COPY:%[0-9]+]]:_(<2 x s64>) = COPY $q0
+    ; CHECK: [[COPY1:%[0-9]+]]:_(<2 x s64>) = COPY $q1
+    ; CHECK: [[ASHR:%[0-9]+]]:_(<2 x s64>) = G_ASHR [[COPY]], [[COPY1]](<2 x s64>)
+    ; CHECK: $q0 = COPY [[ASHR]](<2 x s64>)
+    ; CHECK: RET_ReallyLR implicit $q0
+    %0:_(<2 x s64>) = COPY $q0
+    %1:_(<2 x s64>) = COPY $q1
+    %2:_(<2 x s64>) = G_ASHR %0, %1(<2 x s64>)
+    $q0 = COPY %2(<2 x s64>)
+    RET_ReallyLR implicit $q0
+
+...