def SVEArithUImmPat : ComplexPattern<i32, 1, "SelectSVEArithImm", []>;
def SVEArithSImmPat : ComplexPattern<i32, 1, "SelectSVESignedArithImm", []>;
-def SVEShiftImm64 : ComplexPattern<i32, 1, "SelectSVEShiftImm64<0, 64>", []>;
+def SVEShiftImmL8 : ComplexPattern<i32, 1, "SelectSVEShiftImm<0, 7>", []>;
+def SVEShiftImmL16 : ComplexPattern<i32, 1, "SelectSVEShiftImm<0, 15>", []>;
+def SVEShiftImmL32 : ComplexPattern<i32, 1, "SelectSVEShiftImm<0, 31>", []>;
+def SVEShiftImmL64 : ComplexPattern<i32, 1, "SelectSVEShiftImm<0, 63>", []>;
+def SVEShiftImmR8 : ComplexPattern<i32, 1, "SelectSVEShiftImm<1, 8, true>", []>;
+def SVEShiftImmR16 : ComplexPattern<i32, 1, "SelectSVEShiftImm<1, 16, true>", []>;
+def SVEShiftImmR32 : ComplexPattern<i32, 1, "SelectSVEShiftImm<1, 32, true>", []>;
+def SVEShiftImmR64 : ComplexPattern<i32, 1, "SelectSVEShiftImm<1, 64, true>", []>;
class SVEExactFPImm<string Suffix, string ValA, string ValB> : AsmOperandClass {
let Name = "SVEExactFPImmOperand" # Suffix;
: Pat<(vt (op (vt zprty:$Op1), (vt (AArch64dup (it (cpx i32:$imm, i32:$shift)))))),
(inst $Op1, i32:$imm, i32:$shift)>;
-class SVE_1_Op_Imm_Shift_Pred_Pat<ValueType vt, ValueType pt, SDPatternOperator op,
- ZPRRegOp zprty, Operand ImmTy, Instruction inst>
- : Pat<(vt (op (pt (AArch64ptrue 31)), (vt zprty:$Op1), (vt (AArch64dup (ImmTy:$imm))))),
- (inst $Op1, ImmTy:$imm)>;
-
class SVE_1_Op_Imm_Arith_Pred_Pat<ValueType vt, ValueType pt, SDPatternOperator op,
ZPRRegOp zprty, ValueType it, ComplexPattern cpx, Instruction inst>
: Pat<(vt (op (pt (AArch64ptrue 31)), (vt zprty:$Op1), (vt (AArch64dup (it (cpx i32:$imm)))))),
: Pat<(vt (op pt:$Pg, vt:$Src, inreg_vt, vt:$PassThru)),
(inst $PassThru, $Pg, $Src)>;
+class SVE_Shift_DupImm_Pred_Pat<ValueType vt, SDPatternOperator op,
+ ValueType pt, ValueType it,
+ ComplexPattern cast, Instruction inst>
+: Pat<(vt (op pt:$Pg, vt:$Rn, (vt (AArch64dup (it (cast i32:$imm)))))),
+ (inst $Pg, $Rn, i32:$imm)>;
+
+class SVE_Shift_DupImm_All_Active_Pat<ValueType vt, SDPatternOperator op,
+ ValueType pt, ValueType it,
+ ComplexPattern cast, Instruction inst>
+: Pat<(vt (op (pt (AArch64ptrue 31)), vt:$Rn, (vt (AArch64dup (it (cast i32:$imm)))))),
+ (inst $Rn, i32:$imm)>;
+
//
// Pseudo -> Instruction mappings
//
let ElementSize = zprty.ElementSize;
}
-multiclass sve_int_bin_pred_shift_imm_left<bits<4> opc, string asm, string psName=""> {
- def _B : SVEPseudo2Instr<psName # _B, 1>,
+multiclass sve_int_bin_pred_shift_imm_left<bits<4> opc, string asm, string Ps,
+ SDPatternOperator op = null_frag> {
+ def _B : SVEPseudo2Instr<Ps # _B, 1>,
sve_int_bin_pred_shift_imm<{0,0,0,1}, opc, asm, ZPR8, vecshiftL8>;
- def _H : SVEPseudo2Instr<psName # _H, 1>,
- sve_int_bin_pred_shift_imm<{0,0,1,?}, opc, asm, ZPR16, vecshiftL16> {
- let Inst{8} = imm{3};
- }
- def _S : SVEPseudo2Instr<psName # _S, 1>,
- sve_int_bin_pred_shift_imm<{0,1,?,?}, opc, asm, ZPR32, vecshiftL32> {
- let Inst{9-8} = imm{4-3};
- }
- def _D : SVEPseudo2Instr<psName # _D, 1>,
- sve_int_bin_pred_shift_imm<{1,?,?,?}, opc, asm, ZPR64, vecshiftL64> {
- let Inst{22} = imm{5};
- let Inst{9-8} = imm{4-3};
- }
-}
-
-multiclass sve2_int_bin_pred_shift_imm_left<bits<4> opc, string asm,
- string psName,
- SDPatternOperator op> {
-
- def _B : SVEPseudo2Instr<psName # _B, 1>, sve_int_bin_pred_shift_imm<{0,0,0,1}, opc, asm, ZPR8, vecshiftL8>;
- def _H : SVEPseudo2Instr<psName # _H, 1>,
+ def _H : SVEPseudo2Instr<Ps # _H, 1>,
sve_int_bin_pred_shift_imm<{0,0,1,?}, opc, asm, ZPR16, vecshiftL16> {
let Inst{8} = imm{3};
}
- def _S : SVEPseudo2Instr<psName # _S, 1>,
+ def _S : SVEPseudo2Instr<Ps # _S, 1>,
sve_int_bin_pred_shift_imm<{0,1,?,?}, opc, asm, ZPR32, vecshiftL32> {
let Inst{9-8} = imm{4-3};
}
- def _D : SVEPseudo2Instr<psName # _D, 1>,
+ def _D : SVEPseudo2Instr<Ps # _D, 1>,
sve_int_bin_pred_shift_imm<{1,?,?,?}, opc, asm, ZPR64, vecshiftL64> {
let Inst{22} = imm{5};
let Inst{9-8} = imm{4-3};
def : SVE_3_Op_Imm_Pat<nxv2i64, op, nxv2i1, nxv2i64, i32, tvecshiftL64, !cast<Instruction>(NAME # _D)>;
}
+// As above but shift amount takes the form of a "vector immediate".
+multiclass sve_int_bin_pred_shift_imm_left_dup<bits<4> opc, string asm,
+ string Ps, SDPatternOperator op>
+: sve_int_bin_pred_shift_imm_left<opc, asm, Ps, null_frag> {
+ def : SVE_Shift_DupImm_Pred_Pat<nxv16i8, op, nxv16i1, i32, SVEShiftImmL8, !cast<Instruction>(NAME # _B)>;
+ def : SVE_Shift_DupImm_Pred_Pat<nxv8i16, op, nxv8i1, i32, SVEShiftImmL16, !cast<Instruction>(NAME # _H)>;
+ def : SVE_Shift_DupImm_Pred_Pat<nxv4i32, op, nxv4i1, i32, SVEShiftImmL32, !cast<Instruction>(NAME # _S)>;
+ def : SVE_Shift_DupImm_Pred_Pat<nxv2i64, op, nxv2i1, i64, SVEShiftImmL64, !cast<Instruction>(NAME # _D)>;
+}
+
multiclass sve_int_bin_pred_shift_imm_left_zeroing_bhsd<SDPatternOperator op> {
def _ZERO_B : PredTwoOpImmPseudo<NAME # _B, ZPR8, tvecshiftL8, FalseLanesZero>;
def _ZERO_H : PredTwoOpImmPseudo<NAME # _H, ZPR16, tvecshiftL16, FalseLanesZero>;
def : SVE_3_Op_Imm_Pat<nxv2i64, op, nxv2i1, nxv2i64, i32, tvecshiftR64, !cast<Instruction>(NAME # _D)>;
}
+// As above but shift amount takes the form of a "vector immediate".
+multiclass sve_int_bin_pred_shift_imm_right_dup<bits<4> opc, string asm,
+ string Ps, SDPatternOperator op>
+: sve_int_bin_pred_shift_imm_right<opc, asm, Ps, null_frag> {
+ def : SVE_Shift_DupImm_Pred_Pat<nxv16i8, op, nxv16i1, i32, SVEShiftImmR8, !cast<Instruction>(NAME # _B)>;
+ def : SVE_Shift_DupImm_Pred_Pat<nxv8i16, op, nxv8i1, i32, SVEShiftImmR16, !cast<Instruction>(NAME # _H)>;
+ def : SVE_Shift_DupImm_Pred_Pat<nxv4i32, op, nxv4i1, i32, SVEShiftImmR32, !cast<Instruction>(NAME # _S)>;
+ def : SVE_Shift_DupImm_Pred_Pat<nxv2i64, op, nxv2i1, i64, SVEShiftImmR64, !cast<Instruction>(NAME # _D)>;
+}
+
multiclass sve_int_bin_pred_shift_imm_right_zeroing_bhsd<SDPatternOperator op = null_frag> {
def _ZERO_B : PredTwoOpImmPseudo<NAME # _B, ZPR8, vecshiftR8, FalseLanesZero>;
def _ZERO_H : PredTwoOpImmPseudo<NAME # _H, ZPR16, vecshiftR16, FalseLanesZero>;
let Inst{20-19} = imm{4-3};
}
- def : SVE_1_Op_Imm_Shift_Pred_Pat<nxv16i8, nxv16i1, op, ZPR8, vecshiftL8, !cast<Instruction>(NAME # _B)>;
- def : SVE_1_Op_Imm_Shift_Pred_Pat<nxv8i16, nxv8i1, op, ZPR16, vecshiftL16, !cast<Instruction>(NAME # _H)>;
- def : SVE_1_Op_Imm_Shift_Pred_Pat<nxv4i32, nxv4i1, op, ZPR32, vecshiftL32, !cast<Instruction>(NAME # _S)>;
- def : SVE_1_Op_Imm_Arith_Pred_Pat<nxv2i64, nxv2i1, op, ZPR64, i64, SVEShiftImm64, !cast<Instruction>(NAME # _D)>;
+ def : SVE_Shift_DupImm_All_Active_Pat<nxv16i8, op, nxv16i1, i32, SVEShiftImmL8, !cast<Instruction>(NAME # _B)>;
+ def : SVE_Shift_DupImm_All_Active_Pat<nxv8i16, op, nxv8i1, i32, SVEShiftImmL16, !cast<Instruction>(NAME # _H)>;
+ def : SVE_Shift_DupImm_All_Active_Pat<nxv4i32, op, nxv4i1, i32, SVEShiftImmL32, !cast<Instruction>(NAME # _S)>;
+ def : SVE_Shift_DupImm_All_Active_Pat<nxv2i64, op, nxv2i1, i64, SVEShiftImmL64, !cast<Instruction>(NAME # _D)>;
}
multiclass sve_int_bin_cons_shift_imm_right<bits<2> opc, string asm,
let Inst{20-19} = imm{4-3};
}
- def : SVE_1_Op_Imm_Shift_Pred_Pat<nxv16i8, nxv16i1, op, ZPR8, vecshiftR8, !cast<Instruction>(NAME # _B)>;
- def : SVE_1_Op_Imm_Shift_Pred_Pat<nxv8i16, nxv8i1, op, ZPR16, vecshiftR16, !cast<Instruction>(NAME # _H)>;
- def : SVE_1_Op_Imm_Shift_Pred_Pat<nxv4i32, nxv4i1, op, ZPR32, vecshiftR32, !cast<Instruction>(NAME # _S)>;
- def : SVE_1_Op_Imm_Arith_Pred_Pat<nxv2i64, nxv2i1, op, ZPR64, i64, SVEShiftImm64, !cast<Instruction>(NAME # _D)>;
+ def : SVE_Shift_DupImm_All_Active_Pat<nxv16i8, op, nxv16i1, i32, SVEShiftImmR8, !cast<Instruction>(NAME # _B)>;
+ def : SVE_Shift_DupImm_All_Active_Pat<nxv8i16, op, nxv8i1, i32, SVEShiftImmR16, !cast<Instruction>(NAME # _H)>;
+ def : SVE_Shift_DupImm_All_Active_Pat<nxv4i32, op, nxv4i1, i32, SVEShiftImmR32, !cast<Instruction>(NAME # _S)>;
+ def : SVE_Shift_DupImm_All_Active_Pat<nxv2i64, op, nxv2i1, i64, SVEShiftImmR64, !cast<Instruction>(NAME # _D)>;
}
//===----------------------------------------------------------------------===//
// SVE Memory - Store Group
+; NOTE: Assertions have been autogenerated by utils/update_llc_test_checks.py
; RUN: llc -mtriple=aarch64-linux-gnu -mattr=+sve < %s 2>%t | FileCheck %s
; RUN: FileCheck --check-prefix=WARN --allow-empty %s <%t
define <vscale x 16 x i8> @smax_i8(<vscale x 16 x i8> %a) {
; CHECK-LABEL: smax_i8:
-; CHECK: smax z0.b, z0.b, #-128
-; CHECK-NEXT: ret
+; CHECK: // %bb.0:
+; CHECK-NEXT: smax z0.b, z0.b, #-128
+; CHECK-NEXT: ret
%pg = call <vscale x 16 x i1> @llvm.aarch64.sve.ptrue.nxv16i1(i32 31)
%elt = insertelement <vscale x 16 x i8> undef, i8 -128, i32 0
%splat = shufflevector <vscale x 16 x i8> %elt, <vscale x 16 x i8> undef, <vscale x 16 x i32> zeroinitializer
define <vscale x 8 x i16> @smax_i16(<vscale x 8 x i16> %a) {
; CHECK-LABEL: smax_i16:
-; CHECK: smax z0.h, z0.h, #127
-; CHECK-NEXT: ret
+; CHECK: // %bb.0:
+; CHECK-NEXT: smax z0.h, z0.h, #127
+; CHECK-NEXT: ret
%pg = call <vscale x 8 x i1> @llvm.aarch64.sve.ptrue.nxv8i1(i32 31)
%elt = insertelement <vscale x 8 x i16> undef, i16 127, i32 0
%splat = shufflevector <vscale x 8 x i16> %elt, <vscale x 8 x i16> undef, <vscale x 8 x i32> zeroinitializer
define <vscale x 4 x i32> @smax_i32(<vscale x 4 x i32> %a) {
; CHECK-LABEL: smax_i32:
-; CHECK: smax z0.s, z0.s, #-128
-; CHECK-NEXT: ret
+; CHECK: // %bb.0:
+; CHECK-NEXT: smax z0.s, z0.s, #-128
+; CHECK-NEXT: ret
%pg = call <vscale x 4 x i1> @llvm.aarch64.sve.ptrue.nxv4i1(i32 31)
%elt = insertelement <vscale x 4 x i32> undef, i32 -128, i32 0
%splat = shufflevector <vscale x 4 x i32> %elt, <vscale x 4 x i32> undef, <vscale x 4 x i32> zeroinitializer
define <vscale x 2 x i64> @smax_i64(<vscale x 2 x i64> %a) {
; CHECK-LABEL: smax_i64:
-; CHECK: smax z0.d, z0.d, #127
-; CHECK-NEXT: ret
+; CHECK: // %bb.0:
+; CHECK-NEXT: smax z0.d, z0.d, #127
+; CHECK-NEXT: ret
%pg = call <vscale x 2 x i1> @llvm.aarch64.sve.ptrue.nxv2i1(i32 31)
%elt = insertelement <vscale x 2 x i64> undef, i64 127, i64 0
%splat = shufflevector <vscale x 2 x i64> %elt, <vscale x 2 x i64> undef, <vscale x 2 x i32> zeroinitializer
define <vscale x 16 x i8> @smin_i8(<vscale x 16 x i8> %a) {
; CHECK-LABEL: smin_i8:
-; CHECK: smin z0.b, z0.b, #127
-; CHECK-NEXT: ret
+; CHECK: // %bb.0:
+; CHECK-NEXT: smin z0.b, z0.b, #127
+; CHECK-NEXT: ret
%pg = call <vscale x 16 x i1> @llvm.aarch64.sve.ptrue.nxv16i1(i32 31)
%elt = insertelement <vscale x 16 x i8> undef, i8 127, i32 0
%splat = shufflevector <vscale x 16 x i8> %elt, <vscale x 16 x i8> undef, <vscale x 16 x i32> zeroinitializer
define <vscale x 8 x i16> @smin_i16(<vscale x 8 x i16> %a) {
; CHECK-LABEL: smin_i16:
-; CHECK: smin z0.h, z0.h, #-128
-; CHECK-NEXT: ret
+; CHECK: // %bb.0:
+; CHECK-NEXT: smin z0.h, z0.h, #-128
+; CHECK-NEXT: ret
%pg = call <vscale x 8 x i1> @llvm.aarch64.sve.ptrue.nxv8i1(i32 31)
%elt = insertelement <vscale x 8 x i16> undef, i16 -128, i32 0
%splat = shufflevector <vscale x 8 x i16> %elt, <vscale x 8 x i16> undef, <vscale x 8 x i32> zeroinitializer
define <vscale x 4 x i32> @smin_i32(<vscale x 4 x i32> %a) {
; CHECK-LABEL: smin_i32:
-; CHECK: smin z0.s, z0.s, #127
-; CHECK-NEXT: ret
+; CHECK: // %bb.0:
+; CHECK-NEXT: smin z0.s, z0.s, #127
+; CHECK-NEXT: ret
%pg = call <vscale x 4 x i1> @llvm.aarch64.sve.ptrue.nxv4i1(i32 31)
%elt = insertelement <vscale x 4 x i32> undef, i32 127, i32 0
%splat = shufflevector <vscale x 4 x i32> %elt, <vscale x 4 x i32> undef, <vscale x 4 x i32> zeroinitializer
define <vscale x 2 x i64> @smin_i64(<vscale x 2 x i64> %a) {
; CHECK-LABEL: smin_i64:
-; CHECK: smin z0.d, z0.d, #-128
-; CHECK-NEXT: ret
+; CHECK: // %bb.0:
+; CHECK-NEXT: smin z0.d, z0.d, #-128
+; CHECK-NEXT: ret
%pg = call <vscale x 2 x i1> @llvm.aarch64.sve.ptrue.nxv2i1(i32 31)
%elt = insertelement <vscale x 2 x i64> undef, i64 -128, i64 0
%splat = shufflevector <vscale x 2 x i64> %elt, <vscale x 2 x i64> undef, <vscale x 2 x i32> zeroinitializer
define <vscale x 16 x i8> @umax_i8(<vscale x 16 x i8> %a) {
; CHECK-LABEL: umax_i8:
-; CHECK: umax z0.b, z0.b, #0
-; CHECK-NEXT: ret
+; CHECK: // %bb.0:
+; CHECK-NEXT: umax z0.b, z0.b, #0
+; CHECK-NEXT: ret
%pg = call <vscale x 16 x i1> @llvm.aarch64.sve.ptrue.nxv16i1(i32 31)
%elt = insertelement <vscale x 16 x i8> undef, i8 0, i32 0
%splat = shufflevector <vscale x 16 x i8> %elt, <vscale x 16 x i8> undef, <vscale x 16 x i32> zeroinitializer
define <vscale x 8 x i16> @umax_i16(<vscale x 8 x i16> %a) {
; CHECK-LABEL: umax_i16:
-; CHECK: umax z0.h, z0.h, #255
-; CHECK-NEXT: ret
+; CHECK: // %bb.0:
+; CHECK-NEXT: umax z0.h, z0.h, #255
+; CHECK-NEXT: ret
%pg = call <vscale x 8 x i1> @llvm.aarch64.sve.ptrue.nxv8i1(i32 31)
%elt = insertelement <vscale x 8 x i16> undef, i16 255, i32 0
%splat = shufflevector <vscale x 8 x i16> %elt, <vscale x 8 x i16> undef, <vscale x 8 x i32> zeroinitializer
define <vscale x 4 x i32> @umax_i32(<vscale x 4 x i32> %a) {
; CHECK-LABEL: umax_i32:
-; CHECK: umax z0.s, z0.s, #0
-; CHECK-NEXT: ret
+; CHECK: // %bb.0:
+; CHECK-NEXT: umax z0.s, z0.s, #0
+; CHECK-NEXT: ret
%pg = call <vscale x 4 x i1> @llvm.aarch64.sve.ptrue.nxv4i1(i32 31)
%elt = insertelement <vscale x 4 x i32> undef, i32 0, i32 0
%splat = shufflevector <vscale x 4 x i32> %elt, <vscale x 4 x i32> undef, <vscale x 4 x i32> zeroinitializer
define <vscale x 2 x i64> @umax_i64(<vscale x 2 x i64> %a) {
; CHECK-LABEL: umax_i64:
-; CHECK: umax z0.d, z0.d, #255
-; CHECK-NEXT: ret
+; CHECK: // %bb.0:
+; CHECK-NEXT: umax z0.d, z0.d, #255
+; CHECK-NEXT: ret
%pg = call <vscale x 2 x i1> @llvm.aarch64.sve.ptrue.nxv2i1(i32 31)
%elt = insertelement <vscale x 2 x i64> undef, i64 255, i64 0
%splat = shufflevector <vscale x 2 x i64> %elt, <vscale x 2 x i64> undef, <vscale x 2 x i32> zeroinitializer
define <vscale x 16 x i8> @umin_i8(<vscale x 16 x i8> %a) {
; CHECK-LABEL: umin_i8:
-; CHECK: umin z0.b, z0.b, #255
-; CHECK-NEXT: ret
+; CHECK: // %bb.0:
+; CHECK-NEXT: umin z0.b, z0.b, #255
+; CHECK-NEXT: ret
%pg = call <vscale x 16 x i1> @llvm.aarch64.sve.ptrue.nxv16i1(i32 31)
%elt = insertelement <vscale x 16 x i8> undef, i8 255, i32 0
%splat = shufflevector <vscale x 16 x i8> %elt, <vscale x 16 x i8> undef, <vscale x 16 x i32> zeroinitializer
define <vscale x 8 x i16> @umin_i16(<vscale x 8 x i16> %a) {
; CHECK-LABEL: umin_i16:
-; CHECK: umin z0.h, z0.h, #0
-; CHECK-NEXT: ret
+; CHECK: // %bb.0:
+; CHECK-NEXT: umin z0.h, z0.h, #0
+; CHECK-NEXT: ret
%pg = call <vscale x 8 x i1> @llvm.aarch64.sve.ptrue.nxv8i1(i32 31)
%elt = insertelement <vscale x 8 x i16> undef, i16 0, i32 0
%splat = shufflevector <vscale x 8 x i16> %elt, <vscale x 8 x i16> undef, <vscale x 8 x i32> zeroinitializer
define <vscale x 4 x i32> @umin_i32(<vscale x 4 x i32> %a) {
; CHECK-LABEL: umin_i32:
-; CHECK: umin z0.s, z0.s, #255
-; CHECK-NEXT: ret
+; CHECK: // %bb.0:
+; CHECK-NEXT: umin z0.s, z0.s, #255
+; CHECK-NEXT: ret
%pg = call <vscale x 4 x i1> @llvm.aarch64.sve.ptrue.nxv4i1(i32 31)
%elt = insertelement <vscale x 4 x i32> undef, i32 255, i32 0
%splat = shufflevector <vscale x 4 x i32> %elt, <vscale x 4 x i32> undef, <vscale x 4 x i32> zeroinitializer
define <vscale x 2 x i64> @umin_i64(<vscale x 2 x i64> %a) {
; CHECK-LABEL: umin_i64:
-; CHECK: umin z0.d, z0.d, #0
-; CHECK-NEXT: ret
+; CHECK: // %bb.0:
+; CHECK-NEXT: umin z0.d, z0.d, #0
+; CHECK-NEXT: ret
%pg = call <vscale x 2 x i1> @llvm.aarch64.sve.ptrue.nxv2i1(i32 31)
%elt = insertelement <vscale x 2 x i64> undef, i64 0, i64 0
%splat = shufflevector <vscale x 2 x i64> %elt, <vscale x 2 x i64> undef, <vscale x 2 x i32> zeroinitializer
define <vscale x 16 x i8> @sqadd_b_lowimm(<vscale x 16 x i8> %a) {
; CHECK-LABEL: sqadd_b_lowimm:
-; CHECK: sqadd z0.b, z0.b, #27
-; CHECK-NEXT: ret
+; CHECK: // %bb.0:
+; CHECK-NEXT: sqadd z0.b, z0.b, #27 // =0x1b
+; CHECK-NEXT: ret
%elt = insertelement <vscale x 16 x i8> undef, i8 27, i32 0
%splat = shufflevector <vscale x 16 x i8> %elt, <vscale x 16 x i8> undef, <vscale x 16 x i32> zeroinitializer
%out = call <vscale x 16 x i8> @llvm.aarch64.sve.sqadd.x.nxv16i8(<vscale x 16 x i8> %a,
define <vscale x 8 x i16> @sqadd_h_lowimm(<vscale x 8 x i16> %a) {
; CHECK-LABEL: sqadd_h_lowimm:
-; CHECK: sqadd z0.h, z0.h, #43
-; CHECK-NEXT: ret
+; CHECK: // %bb.0:
+; CHECK-NEXT: sqadd z0.h, z0.h, #43 // =0x2b
+; CHECK-NEXT: ret
%elt = insertelement <vscale x 8 x i16> undef, i16 43, i32 0
%splat = shufflevector <vscale x 8 x i16> %elt, <vscale x 8 x i16> undef, <vscale x 8 x i32> zeroinitializer
%out = call <vscale x 8 x i16> @llvm.aarch64.sve.sqadd.x.nxv8i16(<vscale x 8 x i16> %a,
define <vscale x 8 x i16> @sqadd_h_highimm(<vscale x 8 x i16> %a) {
; CHECK-LABEL: sqadd_h_highimm:
-; CHECK: sqadd z0.h, z0.h, #2048
-; CHECK-NEXT: ret
+; CHECK: // %bb.0:
+; CHECK-NEXT: sqadd z0.h, z0.h, #2048 // =0x800
+; CHECK-NEXT: ret
%elt = insertelement <vscale x 8 x i16> undef, i16 2048, i32 0
%splat = shufflevector <vscale x 8 x i16> %elt, <vscale x 8 x i16> undef, <vscale x 8 x i32> zeroinitializer
%out = call <vscale x 8 x i16> @llvm.aarch64.sve.sqadd.x.nxv8i16(<vscale x 8 x i16> %a,
define <vscale x 4 x i32> @sqadd_s_lowimm(<vscale x 4 x i32> %a) {
; CHECK-LABEL: sqadd_s_lowimm:
-; CHECK: sqadd z0.s, z0.s, #1
-; CHECK-NEXT: ret
+; CHECK: // %bb.0:
+; CHECK-NEXT: sqadd z0.s, z0.s, #1 // =0x1
+; CHECK-NEXT: ret
%elt = insertelement <vscale x 4 x i32> undef, i32 1, i32 0
%splat = shufflevector <vscale x 4 x i32> %elt, <vscale x 4 x i32> undef, <vscale x 4 x i32> zeroinitializer
%out = call <vscale x 4 x i32> @llvm.aarch64.sve.sqadd.x.nxv4i32(<vscale x 4 x i32> %a,
define <vscale x 4 x i32> @sqadd_s_highimm(<vscale x 4 x i32> %a) {
; CHECK-LABEL: sqadd_s_highimm:
-; CHECK: sqadd z0.s, z0.s, #8192
-; CHECK-NEXT: ret
+; CHECK: // %bb.0:
+; CHECK-NEXT: sqadd z0.s, z0.s, #8192 // =0x2000
+; CHECK-NEXT: ret
%elt = insertelement <vscale x 4 x i32> undef, i32 8192, i32 0
%splat = shufflevector <vscale x 4 x i32> %elt, <vscale x 4 x i32> undef, <vscale x 4 x i32> zeroinitializer
%out = call <vscale x 4 x i32> @llvm.aarch64.sve.sqadd.x.nxv4i32(<vscale x 4 x i32> %a,
define <vscale x 2 x i64> @sqadd_d_lowimm(<vscale x 2 x i64> %a) {
; CHECK-LABEL: sqadd_d_lowimm:
-; CHECK: sqadd z0.d, z0.d, #255
-; CHECK-NEXT: ret
+; CHECK: // %bb.0:
+; CHECK-NEXT: sqadd z0.d, z0.d, #255 // =0xff
+; CHECK-NEXT: ret
%elt = insertelement <vscale x 2 x i64> undef, i64 255, i32 0
%splat = shufflevector <vscale x 2 x i64> %elt, <vscale x 2 x i64> undef, <vscale x 2 x i32> zeroinitializer
%out = call <vscale x 2 x i64> @llvm.aarch64.sve.sqadd.x.nxv2i64(<vscale x 2 x i64> %a,
define <vscale x 2 x i64> @sqadd_d_highimm(<vscale x 2 x i64> %a) {
; CHECK-LABEL: sqadd_d_highimm:
-; CHECK: sqadd z0.d, z0.d, #65280
-; CHECK-NEXT: ret
+; CHECK: // %bb.0:
+; CHECK-NEXT: sqadd z0.d, z0.d, #65280 // =0xff00
+; CHECK-NEXT: ret
%elt = insertelement <vscale x 2 x i64> undef, i64 65280, i32 0
%splat = shufflevector <vscale x 2 x i64> %elt, <vscale x 2 x i64> undef, <vscale x 2 x i32> zeroinitializer
%out = call <vscale x 2 x i64> @llvm.aarch64.sve.sqadd.x.nxv2i64(<vscale x 2 x i64> %a,
define <vscale x 16 x i8> @sqsub_b_lowimm(<vscale x 16 x i8> %a) {
; CHECK-LABEL: sqsub_b_lowimm:
-; CHECK: sqsub z0.b, z0.b, #27
-; CHECK-NEXT: ret
+; CHECK: // %bb.0:
+; CHECK-NEXT: sqsub z0.b, z0.b, #27 // =0x1b
+; CHECK-NEXT: ret
%elt = insertelement <vscale x 16 x i8> undef, i8 27, i32 0
%splat = shufflevector <vscale x 16 x i8> %elt, <vscale x 16 x i8> undef, <vscale x 16 x i32> zeroinitializer
%out = call <vscale x 16 x i8> @llvm.aarch64.sve.sqsub.x.nxv16i8(<vscale x 16 x i8> %a,
define <vscale x 8 x i16> @sqsub_h_lowimm(<vscale x 8 x i16> %a) {
; CHECK-LABEL: sqsub_h_lowimm:
-; CHECK: sqsub z0.h, z0.h, #43
-; CHECK-NEXT: ret
+; CHECK: // %bb.0:
+; CHECK-NEXT: sqsub z0.h, z0.h, #43 // =0x2b
+; CHECK-NEXT: ret
%elt = insertelement <vscale x 8 x i16> undef, i16 43, i32 0
%splat = shufflevector <vscale x 8 x i16> %elt, <vscale x 8 x i16> undef, <vscale x 8 x i32> zeroinitializer
%out = call <vscale x 8 x i16> @llvm.aarch64.sve.sqsub.x.nxv8i16(<vscale x 8 x i16> %a,
define <vscale x 8 x i16> @sqsub_h_highimm(<vscale x 8 x i16> %a) {
; CHECK-LABEL: sqsub_h_highimm:
-; CHECK: sqsub z0.h, z0.h, #2048
-; CHECK-NEXT: ret
+; CHECK: // %bb.0:
+; CHECK-NEXT: sqsub z0.h, z0.h, #2048 // =0x800
+; CHECK-NEXT: ret
%elt = insertelement <vscale x 8 x i16> undef, i16 2048, i32 0
%splat = shufflevector <vscale x 8 x i16> %elt, <vscale x 8 x i16> undef, <vscale x 8 x i32> zeroinitializer
%out = call <vscale x 8 x i16> @llvm.aarch64.sve.sqsub.x.nxv8i16(<vscale x 8 x i16> %a,
define <vscale x 4 x i32> @sqsub_s_lowimm(<vscale x 4 x i32> %a) {
; CHECK-LABEL: sqsub_s_lowimm:
-; CHECK: sqsub z0.s, z0.s, #1
-; CHECK-NEXT: ret
+; CHECK: // %bb.0:
+; CHECK-NEXT: sqsub z0.s, z0.s, #1 // =0x1
+; CHECK-NEXT: ret
%elt = insertelement <vscale x 4 x i32> undef, i32 1, i32 0
%splat = shufflevector <vscale x 4 x i32> %elt, <vscale x 4 x i32> undef, <vscale x 4 x i32> zeroinitializer
%out = call <vscale x 4 x i32> @llvm.aarch64.sve.sqsub.x.nxv4i32(<vscale x 4 x i32> %a,
define <vscale x 4 x i32> @sqsub_s_highimm(<vscale x 4 x i32> %a) {
; CHECK-LABEL: sqsub_s_highimm:
-; CHECK: sqsub z0.s, z0.s, #8192
-; CHECK-NEXT: ret
+; CHECK: // %bb.0:
+; CHECK-NEXT: sqsub z0.s, z0.s, #8192 // =0x2000
+; CHECK-NEXT: ret
%elt = insertelement <vscale x 4 x i32> undef, i32 8192, i32 0
%splat = shufflevector <vscale x 4 x i32> %elt, <vscale x 4 x i32> undef, <vscale x 4 x i32> zeroinitializer
%out = call <vscale x 4 x i32> @llvm.aarch64.sve.sqsub.x.nxv4i32(<vscale x 4 x i32> %a,
define <vscale x 2 x i64> @sqsub_d_lowimm(<vscale x 2 x i64> %a) {
; CHECK-LABEL: sqsub_d_lowimm:
-; CHECK: sqsub z0.d, z0.d, #255
-; CHECK-NEXT: ret
+; CHECK: // %bb.0:
+; CHECK-NEXT: sqsub z0.d, z0.d, #255 // =0xff
+; CHECK-NEXT: ret
%elt = insertelement <vscale x 2 x i64> undef, i64 255, i32 0
%splat = shufflevector <vscale x 2 x i64> %elt, <vscale x 2 x i64> undef, <vscale x 2 x i32> zeroinitializer
%out = call <vscale x 2 x i64> @llvm.aarch64.sve.sqsub.x.nxv2i64(<vscale x 2 x i64> %a,
define <vscale x 2 x i64> @sqsub_d_highimm(<vscale x 2 x i64> %a) {
; CHECK-LABEL: sqsub_d_highimm:
-; CHECK: sqsub z0.d, z0.d, #65280
-; CHECK-NEXT: ret
+; CHECK: // %bb.0:
+; CHECK-NEXT: sqsub z0.d, z0.d, #65280 // =0xff00
+; CHECK-NEXT: ret
%elt = insertelement <vscale x 2 x i64> undef, i64 65280, i32 0
%splat = shufflevector <vscale x 2 x i64> %elt, <vscale x 2 x i64> undef, <vscale x 2 x i32> zeroinitializer
%out = call <vscale x 2 x i64> @llvm.aarch64.sve.sqsub.x.nxv2i64(<vscale x 2 x i64> %a,
define <vscale x 16 x i8> @uqadd_b_lowimm(<vscale x 16 x i8> %a) {
; CHECK-LABEL: uqadd_b_lowimm:
-; CHECK: uqadd z0.b, z0.b, #27
-; CHECK-NEXT: ret
+; CHECK: // %bb.0:
+; CHECK-NEXT: uqadd z0.b, z0.b, #27 // =0x1b
+; CHECK-NEXT: ret
%elt = insertelement <vscale x 16 x i8> undef, i8 27, i32 0
%splat = shufflevector <vscale x 16 x i8> %elt, <vscale x 16 x i8> undef, <vscale x 16 x i32> zeroinitializer
%out = call <vscale x 16 x i8> @llvm.aarch64.sve.uqadd.x.nxv16i8(<vscale x 16 x i8> %a,
define <vscale x 8 x i16> @uqadd_h_lowimm(<vscale x 8 x i16> %a) {
; CHECK-LABEL: uqadd_h_lowimm:
-; CHECK: uqadd z0.h, z0.h, #43
-; CHECK-NEXT: ret
+; CHECK: // %bb.0:
+; CHECK-NEXT: uqadd z0.h, z0.h, #43 // =0x2b
+; CHECK-NEXT: ret
%elt = insertelement <vscale x 8 x i16> undef, i16 43, i32 0
%splat = shufflevector <vscale x 8 x i16> %elt, <vscale x 8 x i16> undef, <vscale x 8 x i32> zeroinitializer
%out = call <vscale x 8 x i16> @llvm.aarch64.sve.uqadd.x.nxv8i16(<vscale x 8 x i16> %a,
define <vscale x 8 x i16> @uqadd_h_highimm(<vscale x 8 x i16> %a) {
; CHECK-LABEL: uqadd_h_highimm:
-; CHECK: uqadd z0.h, z0.h, #2048
-; CHECK-NEXT: ret
+; CHECK: // %bb.0:
+; CHECK-NEXT: uqadd z0.h, z0.h, #2048 // =0x800
+; CHECK-NEXT: ret
%elt = insertelement <vscale x 8 x i16> undef, i16 2048, i32 0
%splat = shufflevector <vscale x 8 x i16> %elt, <vscale x 8 x i16> undef, <vscale x 8 x i32> zeroinitializer
%out = call <vscale x 8 x i16> @llvm.aarch64.sve.uqadd.x.nxv8i16(<vscale x 8 x i16> %a,
define <vscale x 4 x i32> @uqadd_s_lowimm(<vscale x 4 x i32> %a) {
; CHECK-LABEL: uqadd_s_lowimm:
-; CHECK: uqadd z0.s, z0.s, #1
-; CHECK-NEXT: ret
+; CHECK: // %bb.0:
+; CHECK-NEXT: uqadd z0.s, z0.s, #1 // =0x1
+; CHECK-NEXT: ret
%elt = insertelement <vscale x 4 x i32> undef, i32 1, i32 0
%splat = shufflevector <vscale x 4 x i32> %elt, <vscale x 4 x i32> undef, <vscale x 4 x i32> zeroinitializer
%out = call <vscale x 4 x i32> @llvm.aarch64.sve.uqadd.x.nxv4i32(<vscale x 4 x i32> %a,
define <vscale x 16 x i8> @uqsub_b_lowimm(<vscale x 16 x i8> %a) {
; CHECK-LABEL: uqsub_b_lowimm:
-; CHECK: uqsub z0.b, z0.b, #27
-; CHECK-NEXT: ret
+; CHECK: // %bb.0:
+; CHECK-NEXT: uqsub z0.b, z0.b, #27 // =0x1b
+; CHECK-NEXT: ret
%elt = insertelement <vscale x 16 x i8> undef, i8 27, i32 0
%splat = shufflevector <vscale x 16 x i8> %elt, <vscale x 16 x i8> undef, <vscale x 16 x i32> zeroinitializer
%out = call <vscale x 16 x i8> @llvm.aarch64.sve.uqsub.x.nxv16i8(<vscale x 16 x i8> %a,
define <vscale x 8 x i16> @uqsub_h_lowimm(<vscale x 8 x i16> %a) {
; CHECK-LABEL: uqsub_h_lowimm:
-; CHECK: uqsub z0.h, z0.h, #43
-; CHECK-NEXT: ret
+; CHECK: // %bb.0:
+; CHECK-NEXT: uqsub z0.h, z0.h, #43 // =0x2b
+; CHECK-NEXT: ret
%elt = insertelement <vscale x 8 x i16> undef, i16 43, i32 0
%splat = shufflevector <vscale x 8 x i16> %elt, <vscale x 8 x i16> undef, <vscale x 8 x i32> zeroinitializer
%out = call <vscale x 8 x i16> @llvm.aarch64.sve.uqsub.x.nxv8i16(<vscale x 8 x i16> %a,
define <vscale x 8 x i16> @uqsub_h_highimm(<vscale x 8 x i16> %a) {
; CHECK-LABEL: uqsub_h_highimm:
-; CHECK: uqsub z0.h, z0.h, #2048
-; CHECK-NEXT: ret
+; CHECK: // %bb.0:
+; CHECK-NEXT: uqsub z0.h, z0.h, #2048 // =0x800
+; CHECK-NEXT: ret
%elt = insertelement <vscale x 8 x i16> undef, i16 2048, i32 0
%splat = shufflevector <vscale x 8 x i16> %elt, <vscale x 8 x i16> undef, <vscale x 8 x i32> zeroinitializer
%out = call <vscale x 8 x i16> @llvm.aarch64.sve.uqsub.x.nxv8i16(<vscale x 8 x i16> %a,
define <vscale x 4 x i32> @uqsub_s_lowimm(<vscale x 4 x i32> %a) {
; CHECK-LABEL: uqsub_s_lowimm:
-; CHECK: uqsub z0.s, z0.s, #1
-; CHECK-NEXT: ret
+; CHECK: // %bb.0:
+; CHECK-NEXT: uqsub z0.s, z0.s, #1 // =0x1
+; CHECK-NEXT: ret
%elt = insertelement <vscale x 4 x i32> undef, i32 1, i32 0
%splat = shufflevector <vscale x 4 x i32> %elt, <vscale x 4 x i32> undef, <vscale x 4 x i32> zeroinitializer
%out = call <vscale x 4 x i32> @llvm.aarch64.sve.uqsub.x.nxv4i32(<vscale x 4 x i32> %a,
define <vscale x 4 x i32> @uqsub_s_highimm(<vscale x 4 x i32> %a) {
; CHECK-LABEL: uqsub_s_highimm:
-; CHECK: uqsub z0.s, z0.s, #8192
-; CHECK-NEXT: ret
+; CHECK: // %bb.0:
+; CHECK-NEXT: uqsub z0.s, z0.s, #8192 // =0x2000
+; CHECK-NEXT: ret
%elt = insertelement <vscale x 4 x i32> undef, i32 8192, i32 0
%splat = shufflevector <vscale x 4 x i32> %elt, <vscale x 4 x i32> undef, <vscale x 4 x i32> zeroinitializer
%out = call <vscale x 4 x i32> @llvm.aarch64.sve.uqsub.x.nxv4i32(<vscale x 4 x i32> %a,
define <vscale x 2 x i64> @uqsub_d_lowimm(<vscale x 2 x i64> %a) {
; CHECK-LABEL: uqsub_d_lowimm:
-; CHECK: uqsub z0.d, z0.d, #255
-; CHECK-NEXT: ret
+; CHECK: // %bb.0:
+; CHECK-NEXT: uqsub z0.d, z0.d, #255 // =0xff
+; CHECK-NEXT: ret
%elt = insertelement <vscale x 2 x i64> undef, i64 255, i32 0
%splat = shufflevector <vscale x 2 x i64> %elt, <vscale x 2 x i64> undef, <vscale x 2 x i32> zeroinitializer
%out = call <vscale x 2 x i64> @llvm.aarch64.sve.uqsub.x.nxv2i64(<vscale x 2 x i64> %a,
define <vscale x 2 x i64> @uqsub_d_highimm(<vscale x 2 x i64> %a) {
; CHECK-LABEL: uqsub_d_highimm:
-; CHECK: uqsub z0.d, z0.d, #65280
-; CHECK-NEXT: ret
+; CHECK: // %bb.0:
+; CHECK-NEXT: uqsub z0.d, z0.d, #65280 // =0xff00
+; CHECK-NEXT: ret
%elt = insertelement <vscale x 2 x i64> undef, i64 65280, i32 0
%splat = shufflevector <vscale x 2 x i64> %elt, <vscale x 2 x i64> undef, <vscale x 2 x i32> zeroinitializer
%out = call <vscale x 2 x i64> @llvm.aarch64.sve.uqsub.x.nxv2i64(<vscale x 2 x i64> %a,
define <vscale x 4 x i32> @uqadd_s_highimm(<vscale x 4 x i32> %a) {
; CHECK-LABEL: uqadd_s_highimm:
-; CHECK: uqadd z0.s, z0.s, #8192
-; CHECK-NEXT: ret
+; CHECK: // %bb.0:
+; CHECK-NEXT: uqadd z0.s, z0.s, #8192 // =0x2000
+; CHECK-NEXT: ret
%elt = insertelement <vscale x 4 x i32> undef, i32 8192, i32 0
%splat = shufflevector <vscale x 4 x i32> %elt, <vscale x 4 x i32> undef, <vscale x 4 x i32> zeroinitializer
%out = call <vscale x 4 x i32> @llvm.aarch64.sve.uqadd.x.nxv4i32(<vscale x 4 x i32> %a,
define <vscale x 2 x i64> @uqadd_d_lowimm(<vscale x 2 x i64> %a) {
; CHECK-LABEL: uqadd_d_lowimm:
-; CHECK: uqadd z0.d, z0.d, #255
-; CHECK-NEXT: ret
+; CHECK: // %bb.0:
+; CHECK-NEXT: uqadd z0.d, z0.d, #255 // =0xff
+; CHECK-NEXT: ret
%elt = insertelement <vscale x 2 x i64> undef, i64 255, i32 0
%splat = shufflevector <vscale x 2 x i64> %elt, <vscale x 2 x i64> undef, <vscale x 2 x i32> zeroinitializer
%out = call <vscale x 2 x i64> @llvm.aarch64.sve.uqadd.x.nxv2i64(<vscale x 2 x i64> %a,
define <vscale x 2 x i64> @uqadd_d_highimm(<vscale x 2 x i64> %a) {
; CHECK-LABEL: uqadd_d_highimm:
-; CHECK: uqadd z0.d, z0.d, #65280
-; CHECK-NEXT: ret
+; CHECK: // %bb.0:
+; CHECK-NEXT: uqadd z0.d, z0.d, #65280 // =0xff00
+; CHECK-NEXT: ret
%elt = insertelement <vscale x 2 x i64> undef, i64 65280, i32 0
%splat = shufflevector <vscale x 2 x i64> %elt, <vscale x 2 x i64> undef, <vscale x 2 x i32> zeroinitializer
%out = call <vscale x 2 x i64> @llvm.aarch64.sve.uqadd.x.nxv2i64(<vscale x 2 x i64> %a,
; ASR
-define <vscale x 16 x i8> @asr_i8(<vscale x 16 x i8> %a) {
+define <vscale x 16 x i8> @asr_i8(<vscale x 16 x i1> %pg, <vscale x 16 x i8> %a) {
; CHECK-LABEL: asr_i8:
-; CHECK: asr z0.b, z0.b, #8
-; CHECK-NEXT: ret
+; CHECK: // %bb.0:
+; CHECK-NEXT: asr z0.b, p0/m, z0.b, #8
+; CHECK-NEXT: ret
+ %elt = insertelement <vscale x 16 x i8> undef, i8 9, i32 0
+ %splat = shufflevector <vscale x 16 x i8> %elt, <vscale x 16 x i8> undef, <vscale x 16 x i32> zeroinitializer
+ %out = call <vscale x 16 x i8> @llvm.aarch64.sve.asr.nxv16i8(<vscale x 16 x i1> %pg,
+ <vscale x 16 x i8> %a,
+ <vscale x 16 x i8> %splat)
+ ret <vscale x 16 x i8> %out
+}
+
+define <vscale x 16 x i8> @asr_i8_all_active(<vscale x 16 x i8> %a) {
+; CHECK-LABEL: asr_i8_all_active:
+; CHECK: // %bb.0:
+; CHECK-NEXT: asr z0.b, z0.b, #8
+; CHECK-NEXT: ret
%pg = call <vscale x 16 x i1> @llvm.aarch64.sve.ptrue.nxv16i1(i32 31)
%elt = insertelement <vscale x 16 x i8> undef, i8 8, i32 0
%splat = shufflevector <vscale x 16 x i8> %elt, <vscale x 16 x i8> undef, <vscale x 16 x i32> zeroinitializer
ret <vscale x 16 x i8> %out
}
-define <vscale x 8 x i16> @asr_i16(<vscale x 8 x i16> %a) {
+; Ensure we don't match a right shift by zero to the immediate form.
+define <vscale x 16 x i8> @asr_i8_too_small(<vscale x 16 x i1> %pg, <vscale x 16 x i8> %a) {
+; CHECK-LABEL: asr_i8_too_small:
+; CHECK: // %bb.0:
+; CHECK-NEXT: mov z1.b, #0 // =0x0
+; CHECK-NEXT: asr z0.b, p0/m, z0.b, z1.b
+; CHECK-NEXT: ret
+ %out = call <vscale x 16 x i8> @llvm.aarch64.sve.asr.nxv16i8(<vscale x 16 x i1> %pg,
+ <vscale x 16 x i8> %a,
+ <vscale x 16 x i8> zeroinitializer)
+ ret <vscale x 16 x i8> %out
+}
+
+define <vscale x 8 x i16> @asr_i16(<vscale x 8 x i1> %pg, <vscale x 8 x i16> %a) {
; CHECK-LABEL: asr_i16:
-; CHECK: asr z0.h, z0.h, #16
-; CHECK-NEXT: ret
+; CHECK: // %bb.0:
+; CHECK-NEXT: asr z0.h, p0/m, z0.h, #16
+; CHECK-NEXT: ret
+ %elt = insertelement <vscale x 8 x i16> undef, i16 17, i32 0
+ %splat = shufflevector <vscale x 8 x i16> %elt, <vscale x 8 x i16> undef, <vscale x 8 x i32> zeroinitializer
+ %out = call <vscale x 8 x i16> @llvm.aarch64.sve.asr.nxv8i16(<vscale x 8 x i1> %pg,
+ <vscale x 8 x i16> %a,
+ <vscale x 8 x i16> %splat)
+ ret <vscale x 8 x i16> %out
+}
+
+define <vscale x 8 x i16> @asr_i16_all_active(<vscale x 8 x i16> %a) {
+; CHECK-LABEL: asr_i16_all_active:
+; CHECK: // %bb.0:
+; CHECK-NEXT: asr z0.h, z0.h, #16
+; CHECK-NEXT: ret
%pg = call <vscale x 8 x i1> @llvm.aarch64.sve.ptrue.nxv8i1(i32 31)
%elt = insertelement <vscale x 8 x i16> undef, i16 16, i32 0
%splat = shufflevector <vscale x 8 x i16> %elt, <vscale x 8 x i16> undef, <vscale x 8 x i32> zeroinitializer
ret <vscale x 8 x i16> %out
}
-define <vscale x 4 x i32> @asr_i32(<vscale x 4 x i32> %a) {
+; Ensure we don't match a right shift by zero to the immediate form.
+define <vscale x 8 x i16> @asr_i16_too_small(<vscale x 8 x i1> %pg, <vscale x 8 x i16> %a) {
+; CHECK-LABEL: asr_i16_too_small:
+; CHECK: // %bb.0:
+; CHECK-NEXT: mov z1.h, #0 // =0x0
+; CHECK-NEXT: asr z0.h, p0/m, z0.h, z1.h
+; CHECK-NEXT: ret
+ %out = call <vscale x 8 x i16> @llvm.aarch64.sve.asr.nxv8i16(<vscale x 8 x i1> %pg,
+ <vscale x 8 x i16> %a,
+ <vscale x 8 x i16> zeroinitializer)
+ ret <vscale x 8 x i16> %out
+}
+
+define <vscale x 4 x i32> @asr_i32(<vscale x 4 x i1> %pg, <vscale x 4 x i32> %a) {
; CHECK-LABEL: asr_i32:
-; CHECK: asr z0.s, z0.s, #32
-; CHECK-NEXT: ret
+; CHECK: // %bb.0:
+; CHECK-NEXT: asr z0.s, p0/m, z0.s, #32
+; CHECK-NEXT: ret
+ %elt = insertelement <vscale x 4 x i32> undef, i32 33, i32 0
+ %splat = shufflevector <vscale x 4 x i32> %elt, <vscale x 4 x i32> undef, <vscale x 4 x i32> zeroinitializer
+ %out = call <vscale x 4 x i32> @llvm.aarch64.sve.asr.nxv4i32(<vscale x 4 x i1> %pg,
+ <vscale x 4 x i32> %a,
+ <vscale x 4 x i32> %splat)
+ ret <vscale x 4 x i32> %out
+}
+
+define <vscale x 4 x i32> @asr_i32_all_active(<vscale x 4 x i32> %a) {
+; CHECK-LABEL: asr_i32_all_active:
+; CHECK: // %bb.0:
+; CHECK-NEXT: asr z0.s, z0.s, #32
+; CHECK-NEXT: ret
%pg = call <vscale x 4 x i1> @llvm.aarch64.sve.ptrue.nxv4i1(i32 31)
%elt = insertelement <vscale x 4 x i32> undef, i32 32, i32 0
%splat = shufflevector <vscale x 4 x i32> %elt, <vscale x 4 x i32> undef, <vscale x 4 x i32> zeroinitializer
ret <vscale x 4 x i32> %out
}
-define <vscale x 2 x i64> @asr_i64(<vscale x 2 x i64> %a) {
+; Ensure we don't match a right shift by zero to the immediate form.
+define <vscale x 4 x i32> @asr_i32_too_small(<vscale x 4 x i1> %pg, <vscale x 4 x i32> %a) {
+; CHECK-LABEL: asr_i32_too_small:
+; CHECK: // %bb.0:
+; CHECK-NEXT: mov z1.s, #0 // =0x0
+; CHECK-NEXT: asr z0.s, p0/m, z0.s, z1.s
+; CHECK-NEXT: ret
+ %out = call <vscale x 4 x i32> @llvm.aarch64.sve.asr.nxv4i32(<vscale x 4 x i1> %pg,
+ <vscale x 4 x i32> %a,
+ <vscale x 4 x i32> zeroinitializer)
+ ret <vscale x 4 x i32> %out
+}
+
+define <vscale x 2 x i64> @asr_i64(<vscale x 2 x i1> %pg, <vscale x 2 x i64> %a) {
; CHECK-LABEL: asr_i64:
-; CHECK: asr z0.d, z0.d, #64
-; CHECK-NEXT: ret
+; CHECK: // %bb.0:
+; CHECK-NEXT: asr z0.d, p0/m, z0.d, #64
+; CHECK-NEXT: ret
+ %elt = insertelement <vscale x 2 x i64> undef, i64 65, i64 0
+ %splat = shufflevector <vscale x 2 x i64> %elt, <vscale x 2 x i64> undef, <vscale x 2 x i32> zeroinitializer
+ %out = call <vscale x 2 x i64> @llvm.aarch64.sve.asr.nxv2i64(<vscale x 2 x i1> %pg,
+ <vscale x 2 x i64> %a,
+ <vscale x 2 x i64> %splat)
+ ret <vscale x 2 x i64> %out
+}
+
+define <vscale x 2 x i64> @asr_i64_all_active(<vscale x 2 x i64> %a) {
+; CHECK-LABEL: asr_i64_all_active:
+; CHECK: // %bb.0:
+; CHECK-NEXT: asr z0.d, z0.d, #64
+; CHECK-NEXT: ret
%pg = call <vscale x 2 x i1> @llvm.aarch64.sve.ptrue.nxv2i1(i32 31)
%elt = insertelement <vscale x 2 x i64> undef, i64 64, i64 0
%splat = shufflevector <vscale x 2 x i64> %elt, <vscale x 2 x i64> undef, <vscale x 2 x i32> zeroinitializer
ret <vscale x 2 x i64> %out
}
+; Ensure we don't match a right shift by zero to the immediate form.
+define <vscale x 2 x i64> @asr_i64_too_small(<vscale x 2 x i1> %pg, <vscale x 2 x i64> %a) {
+; CHECK-LABEL: asr_i64_too_small:
+; CHECK: // %bb.0:
+; CHECK-NEXT: mov z1.d, #0 // =0x0
+; CHECK-NEXT: asr z0.d, p0/m, z0.d, z1.d
+; CHECK-NEXT: ret
+ %out = call <vscale x 2 x i64> @llvm.aarch64.sve.asr.nxv2i64(<vscale x 2 x i1> %pg,
+ <vscale x 2 x i64> %a,
+ <vscale x 2 x i64> zeroinitializer)
+ ret <vscale x 2 x i64> %out
+}
+
; LSL
-define <vscale x 16 x i8> @lsl_i8(<vscale x 16 x i8> %a) {
+define <vscale x 16 x i8> @lsl_i8(<vscale x 16 x i1> %pg, <vscale x 16 x i8> %a) {
; CHECK-LABEL: lsl_i8:
-; CHECK: lsl z0.b, z0.b, #7
-; CHECK-NEXT: ret
+; CHECK: // %bb.0:
+; CHECK-NEXT: lsl z0.b, p0/m, z0.b, #7
+; CHECK-NEXT: ret
+ %elt = insertelement <vscale x 16 x i8> undef, i8 7, i32 0
+ %splat = shufflevector <vscale x 16 x i8> %elt, <vscale x 16 x i8> undef, <vscale x 16 x i32> zeroinitializer
+ %out = call <vscale x 16 x i8> @llvm.aarch64.sve.lsl.nxv16i8(<vscale x 16 x i1> %pg,
+ <vscale x 16 x i8> %a,
+ <vscale x 16 x i8> %splat)
+ ret <vscale x 16 x i8> %out
+}
+
+define <vscale x 16 x i8> @lsl_i8_all_active(<vscale x 16 x i8> %a) {
+; CHECK-LABEL: lsl_i8_all_active:
+; CHECK: // %bb.0:
+; CHECK-NEXT: lsl z0.b, z0.b, #7
+; CHECK-NEXT: ret
%pg = call <vscale x 16 x i1> @llvm.aarch64.sve.ptrue.nxv16i1(i32 31)
%elt = insertelement <vscale x 16 x i8> undef, i8 7, i32 0
%splat = shufflevector <vscale x 16 x i8> %elt, <vscale x 16 x i8> undef, <vscale x 16 x i32> zeroinitializer
ret <vscale x 16 x i8> %out
}
-define <vscale x 8 x i16> @lsl_i16(<vscale x 8 x i16> %a) {
+; Ensure we don't match a left shift bigger than its bitwidth to the immediate form.
+define <vscale x 16 x i8> @lsl_i8_too_big(<vscale x 16 x i1> %pg, <vscale x 16 x i8> %a) {
+; CHECK-LABEL: lsl_i8_too_big:
+; CHECK: // %bb.0:
+; CHECK-NEXT: mov z1.b, #8 // =0x8
+; CHECK-NEXT: lsl z0.b, p0/m, z0.b, z1.b
+; CHECK-NEXT: ret
+ %elt = insertelement <vscale x 16 x i8> undef, i8 8, i32 0
+ %splat = shufflevector <vscale x 16 x i8> %elt, <vscale x 16 x i8> undef, <vscale x 16 x i32> zeroinitializer
+ %out = call <vscale x 16 x i8> @llvm.aarch64.sve.lsl.nxv16i8(<vscale x 16 x i1> %pg,
+ <vscale x 16 x i8> %a,
+ <vscale x 16 x i8> %splat)
+ ret <vscale x 16 x i8> %out
+}
+
+define <vscale x 16 x i8> @lsl_i8_zero(<vscale x 16 x i1> %pg, <vscale x 16 x i8> %a) {
+; CHECK-LABEL: lsl_i8_zero:
+; CHECK: // %bb.0:
+; CHECK-NEXT: lsl z0.b, p0/m, z0.b, #0
+; CHECK-NEXT: ret
+ %out = call <vscale x 16 x i8> @llvm.aarch64.sve.lsl.nxv16i8(<vscale x 16 x i1> %pg,
+ <vscale x 16 x i8> %a,
+ <vscale x 16 x i8> zeroinitializer)
+ ret <vscale x 16 x i8> %out
+}
+
+define <vscale x 8 x i16> @lsl_i16(<vscale x 8 x i1> %pg, <vscale x 8 x i16> %a) {
; CHECK-LABEL: lsl_i16:
-; CHECK: lsl z0.h, z0.h, #15
-; CHECK-NEXT: ret
+; CHECK: // %bb.0:
+; CHECK-NEXT: lsl z0.h, p0/m, z0.h, #15
+; CHECK-NEXT: ret
+ %elt = insertelement <vscale x 8 x i16> undef, i16 15, i32 0
+ %splat = shufflevector <vscale x 8 x i16> %elt, <vscale x 8 x i16> undef, <vscale x 8 x i32> zeroinitializer
+ %out = call <vscale x 8 x i16> @llvm.aarch64.sve.lsl.nxv8i16(<vscale x 8 x i1> %pg,
+ <vscale x 8 x i16> %a,
+ <vscale x 8 x i16> %splat)
+ ret <vscale x 8 x i16> %out
+}
+
+define <vscale x 8 x i16> @lsl_i16_all_active(<vscale x 8 x i16> %a) {
+; CHECK-LABEL: lsl_i16_all_active:
+; CHECK: // %bb.0:
+; CHECK-NEXT: lsl z0.h, z0.h, #15
+; CHECK-NEXT: ret
%pg = call <vscale x 8 x i1> @llvm.aarch64.sve.ptrue.nxv8i1(i32 31)
%elt = insertelement <vscale x 8 x i16> undef, i16 15, i32 0
%splat = shufflevector <vscale x 8 x i16> %elt, <vscale x 8 x i16> undef, <vscale x 8 x i32> zeroinitializer
ret <vscale x 8 x i16> %out
}
-define <vscale x 4 x i32> @lsl_i32(<vscale x 4 x i32> %a) {
+; Ensure we don't match a left shift bigger than its bitwidth to the immediate form.
+define <vscale x 8 x i16> @lsl_i16_too_big(<vscale x 8 x i1> %pg, <vscale x 8 x i16> %a) {
+; CHECK-LABEL: lsl_i16_too_big:
+; CHECK: // %bb.0:
+; CHECK-NEXT: mov z1.h, #16 // =0x10
+; CHECK-NEXT: lsl z0.h, p0/m, z0.h, z1.h
+; CHECK-NEXT: ret
+ %elt = insertelement <vscale x 8 x i16> undef, i16 16, i32 0
+ %splat = shufflevector <vscale x 8 x i16> %elt, <vscale x 8 x i16> undef, <vscale x 8 x i32> zeroinitializer
+ %out = call <vscale x 8 x i16> @llvm.aarch64.sve.lsl.nxv8i16(<vscale x 8 x i1> %pg,
+ <vscale x 8 x i16> %a,
+ <vscale x 8 x i16> %splat)
+ ret <vscale x 8 x i16> %out
+}
+
+define <vscale x 8 x i16> @lsl_i16_zero(<vscale x 8 x i1> %pg, <vscale x 8 x i16> %a) {
+; CHECK-LABEL: lsl_i16_zero:
+; CHECK: // %bb.0:
+; CHECK-NEXT: lsl z0.h, p0/m, z0.h, #0
+; CHECK-NEXT: ret
+ %out = call <vscale x 8 x i16> @llvm.aarch64.sve.lsl.nxv8i16(<vscale x 8 x i1> %pg,
+ <vscale x 8 x i16> %a,
+ <vscale x 8 x i16> zeroinitializer)
+ ret <vscale x 8 x i16> %out
+}
+
+define <vscale x 4 x i32> @lsl_i32(<vscale x 4 x i1> %pg, <vscale x 4 x i32> %a) {
; CHECK-LABEL: lsl_i32:
-; CHECK: lsl z0.s, z0.s, #31
-; CHECK-NEXT: ret
+; CHECK: // %bb.0:
+; CHECK-NEXT: lsl z0.s, p0/m, z0.s, #31
+; CHECK-NEXT: ret
+ %elt = insertelement <vscale x 4 x i32> undef, i32 31, i32 0
+ %splat = shufflevector <vscale x 4 x i32> %elt, <vscale x 4 x i32> undef, <vscale x 4 x i32> zeroinitializer
+ %out = call <vscale x 4 x i32> @llvm.aarch64.sve.lsl.nxv4i32(<vscale x 4 x i1> %pg,
+ <vscale x 4 x i32> %a,
+ <vscale x 4 x i32> %splat)
+ ret <vscale x 4 x i32> %out
+}
+
+define <vscale x 4 x i32> @lsl_i32_all_active(<vscale x 4 x i32> %a) {
+; CHECK-LABEL: lsl_i32_all_active:
+; CHECK: // %bb.0:
+; CHECK-NEXT: lsl z0.s, z0.s, #31
+; CHECK-NEXT: ret
%pg = call <vscale x 4 x i1> @llvm.aarch64.sve.ptrue.nxv4i1(i32 31)
%elt = insertelement <vscale x 4 x i32> undef, i32 31, i32 0
%splat = shufflevector <vscale x 4 x i32> %elt, <vscale x 4 x i32> undef, <vscale x 4 x i32> zeroinitializer
ret <vscale x 4 x i32> %out
}
-define <vscale x 2 x i64> @lsl_i64(<vscale x 2 x i64> %a) {
+; Ensure we don't match a left shift bigger than its bitwidth to the immediate form.
+define <vscale x 4 x i32> @lsl_i32_too_big(<vscale x 4 x i1> %pg, <vscale x 4 x i32> %a) {
+; CHECK-LABEL: lsl_i32_too_big:
+; CHECK: // %bb.0:
+; CHECK-NEXT: mov z1.s, #32 // =0x20
+; CHECK-NEXT: lsl z0.s, p0/m, z0.s, z1.s
+; CHECK-NEXT: ret
+ %elt = insertelement <vscale x 4 x i32> undef, i32 32, i32 0
+ %splat = shufflevector <vscale x 4 x i32> %elt, <vscale x 4 x i32> undef, <vscale x 4 x i32> zeroinitializer
+ %out = call <vscale x 4 x i32> @llvm.aarch64.sve.lsl.nxv4i32(<vscale x 4 x i1> %pg,
+ <vscale x 4 x i32> %a,
+ <vscale x 4 x i32> %splat)
+ ret <vscale x 4 x i32> %out
+}
+
+define <vscale x 4 x i32> @lsl_i32_zero(<vscale x 4 x i1> %pg, <vscale x 4 x i32> %a) {
+; CHECK-LABEL: lsl_i32_zero:
+; CHECK: // %bb.0:
+; CHECK-NEXT: lsl z0.s, p0/m, z0.s, #0
+; CHECK-NEXT: ret
+ %out = call <vscale x 4 x i32> @llvm.aarch64.sve.lsl.nxv4i32(<vscale x 4 x i1> %pg,
+ <vscale x 4 x i32> %a,
+ <vscale x 4 x i32> zeroinitializer)
+ ret <vscale x 4 x i32> %out
+}
+
+define <vscale x 2 x i64> @lsl_i64(<vscale x 2 x i1> %pg, <vscale x 2 x i64> %a) {
; CHECK-LABEL: lsl_i64:
-; CHECK: lsl z0.d, z0.d, #63
-; CHECK-NEXT: ret
+; CHECK: // %bb.0:
+; CHECK-NEXT: lsl z0.d, p0/m, z0.d, #63
+; CHECK-NEXT: ret
+ %elt = insertelement <vscale x 2 x i64> undef, i64 63, i64 0
+ %splat = shufflevector <vscale x 2 x i64> %elt, <vscale x 2 x i64> undef, <vscale x 2 x i32> zeroinitializer
+ %out = call <vscale x 2 x i64> @llvm.aarch64.sve.lsl.nxv2i64(<vscale x 2 x i1> %pg,
+ <vscale x 2 x i64> %a,
+ <vscale x 2 x i64> %splat)
+ ret <vscale x 2 x i64> %out
+}
+
+define <vscale x 2 x i64> @lsl_i64_all_active(<vscale x 2 x i64> %a) {
+; CHECK-LABEL: lsl_i64_all_active:
+; CHECK: // %bb.0:
+; CHECK-NEXT: lsl z0.d, z0.d, #63
+; CHECK-NEXT: ret
%pg = call <vscale x 2 x i1> @llvm.aarch64.sve.ptrue.nxv2i1(i32 31)
%elt = insertelement <vscale x 2 x i64> undef, i64 63, i64 0
%splat = shufflevector <vscale x 2 x i64> %elt, <vscale x 2 x i64> undef, <vscale x 2 x i32> zeroinitializer
ret <vscale x 2 x i64> %out
}
+; Ensure we don't match a left shift bigger than its bitwidth to the immediate form.
+define <vscale x 2 x i64> @lsl_i64_too_big(<vscale x 2 x i1> %pg, <vscale x 2 x i64> %a) {
+; CHECK-LABEL: lsl_i64_too_big:
+; CHECK: // %bb.0:
+; CHECK-NEXT: mov z1.d, #64 // =0x40
+; CHECK-NEXT: lsl z0.d, p0/m, z0.d, z1.d
+; CHECK-NEXT: ret
+ %elt = insertelement <vscale x 2 x i64> undef, i64 64, i64 0
+ %splat = shufflevector <vscale x 2 x i64> %elt, <vscale x 2 x i64> undef, <vscale x 2 x i32> zeroinitializer
+ %out = call <vscale x 2 x i64> @llvm.aarch64.sve.lsl.nxv2i64(<vscale x 2 x i1> %pg,
+ <vscale x 2 x i64> %a,
+ <vscale x 2 x i64> %splat)
+ ret <vscale x 2 x i64> %out
+}
+
+define <vscale x 2 x i64> @lsl_i64_zero(<vscale x 2 x i1> %pg, <vscale x 2 x i64> %a) {
+; CHECK-LABEL: lsl_i64_zero:
+; CHECK: // %bb.0:
+; CHECK-NEXT: lsl z0.d, p0/m, z0.d, #0
+; CHECK-NEXT: ret
+ %out = call <vscale x 2 x i64> @llvm.aarch64.sve.lsl.nxv2i64(<vscale x 2 x i1> %pg,
+ <vscale x 2 x i64> %a,
+ <vscale x 2 x i64> zeroinitializer)
+ ret <vscale x 2 x i64> %out
+}
+
; LSR
-define <vscale x 16 x i8> @lsr_i8(<vscale x 16 x i8> %a) {
+define <vscale x 16 x i8> @lsr_i8(<vscale x 16 x i1> %pg, <vscale x 16 x i8> %a) {
; CHECK-LABEL: lsr_i8:
-; CHECK: lsr z0.b, z0.b, #8
-; CHECK-NEXT: ret
+; CHECK: // %bb.0:
+; CHECK-NEXT: lsr z0.b, p0/m, z0.b, #8
+; CHECK-NEXT: ret
+ %elt = insertelement <vscale x 16 x i8> undef, i8 9, i32 0
+ %splat = shufflevector <vscale x 16 x i8> %elt, <vscale x 16 x i8> undef, <vscale x 16 x i32> zeroinitializer
+ %out = call <vscale x 16 x i8> @llvm.aarch64.sve.lsr.nxv16i8(<vscale x 16 x i1> %pg,
+ <vscale x 16 x i8> %a,
+ <vscale x 16 x i8> %splat)
+ ret <vscale x 16 x i8> %out
+}
+
+define <vscale x 16 x i8> @lsr_i8_all_active(<vscale x 16 x i8> %a) {
+; CHECK-LABEL: lsr_i8_all_active:
+; CHECK: // %bb.0:
+; CHECK-NEXT: lsr z0.b, z0.b, #8
+; CHECK-NEXT: ret
%pg = call <vscale x 16 x i1> @llvm.aarch64.sve.ptrue.nxv16i1(i32 31)
%elt = insertelement <vscale x 16 x i8> undef, i8 8, i32 0
%splat = shufflevector <vscale x 16 x i8> %elt, <vscale x 16 x i8> undef, <vscale x 16 x i32> zeroinitializer
ret <vscale x 16 x i8> %out
}
-define <vscale x 8 x i16> @lsr_i16(<vscale x 8 x i16> %a) {
+; Ensure we don't match a right shift by zero to the immediate form.
+define <vscale x 16 x i8> @lsr_i8_too_small(<vscale x 16 x i1> %pg, <vscale x 16 x i8> %a) {
+; CHECK-LABEL: lsr_i8_too_small:
+; CHECK: // %bb.0:
+; CHECK-NEXT: mov z1.b, #0 // =0x0
+; CHECK-NEXT: lsr z0.b, p0/m, z0.b, z1.b
+; CHECK-NEXT: ret
+ %out = call <vscale x 16 x i8> @llvm.aarch64.sve.lsr.nxv16i8(<vscale x 16 x i1> %pg,
+ <vscale x 16 x i8> %a,
+ <vscale x 16 x i8> zeroinitializer)
+ ret <vscale x 16 x i8> %out
+}
+
+define <vscale x 8 x i16> @lsr_i16(<vscale x 8 x i1> %pg, <vscale x 8 x i16> %a) {
; CHECK-LABEL: lsr_i16:
-; CHECK: lsr z0.h, z0.h, #16
-; CHECK-NEXT: ret
+; CHECK: // %bb.0:
+; CHECK-NEXT: lsr z0.h, p0/m, z0.h, #16
+; CHECK-NEXT: ret
+ %elt = insertelement <vscale x 8 x i16> undef, i16 17, i32 0
+ %splat = shufflevector <vscale x 8 x i16> %elt, <vscale x 8 x i16> undef, <vscale x 8 x i32> zeroinitializer
+ %out = call <vscale x 8 x i16> @llvm.aarch64.sve.lsr.nxv8i16(<vscale x 8 x i1> %pg,
+ <vscale x 8 x i16> %a,
+ <vscale x 8 x i16> %splat)
+ ret <vscale x 8 x i16> %out
+}
+
+define <vscale x 8 x i16> @lsr_i16_all_active(<vscale x 8 x i16> %a) {
+; CHECK-LABEL: lsr_i16_all_active:
+; CHECK: // %bb.0:
+; CHECK-NEXT: lsr z0.h, z0.h, #16
+; CHECK-NEXT: ret
%pg = call <vscale x 8 x i1> @llvm.aarch64.sve.ptrue.nxv8i1(i32 31)
%elt = insertelement <vscale x 8 x i16> undef, i16 16, i32 0
%splat = shufflevector <vscale x 8 x i16> %elt, <vscale x 8 x i16> undef, <vscale x 8 x i32> zeroinitializer
ret <vscale x 8 x i16> %out
}
-define <vscale x 4 x i32> @lsr_i32(<vscale x 4 x i32> %a) {
+; Ensure we don't match a right shift by zero to the immediate form.
+define <vscale x 8 x i16> @lsr_i16_too_small(<vscale x 8 x i1> %pg, <vscale x 8 x i16> %a) {
+; CHECK-LABEL: lsr_i16_too_small:
+; CHECK: // %bb.0:
+; CHECK-NEXT: mov z1.h, #0 // =0x0
+; CHECK-NEXT: lsr z0.h, p0/m, z0.h, z1.h
+; CHECK-NEXT: ret
+ %out = call <vscale x 8 x i16> @llvm.aarch64.sve.lsr.nxv8i16(<vscale x 8 x i1> %pg,
+ <vscale x 8 x i16> %a,
+ <vscale x 8 x i16> zeroinitializer)
+ ret <vscale x 8 x i16> %out
+}
+
+define <vscale x 4 x i32> @lsr_i32(<vscale x 4 x i1> %pg, <vscale x 4 x i32> %a) {
; CHECK-LABEL: lsr_i32:
-; CHECK: lsr z0.s, z0.s, #32
-; CHECK-NEXT: ret
+; CHECK: // %bb.0:
+; CHECK-NEXT: lsr z0.s, p0/m, z0.s, #32
+; CHECK-NEXT: ret
+ %elt = insertelement <vscale x 4 x i32> undef, i32 33, i32 0
+ %splat = shufflevector <vscale x 4 x i32> %elt, <vscale x 4 x i32> undef, <vscale x 4 x i32> zeroinitializer
+ %out = call <vscale x 4 x i32> @llvm.aarch64.sve.lsr.nxv4i32(<vscale x 4 x i1> %pg,
+ <vscale x 4 x i32> %a,
+ <vscale x 4 x i32> %splat)
+ ret <vscale x 4 x i32> %out
+}
+
+define <vscale x 4 x i32> @lsr_i32_all_active(<vscale x 4 x i32> %a) {
+; CHECK-LABEL: lsr_i32_all_active:
+; CHECK: // %bb.0:
+; CHECK-NEXT: lsr z0.s, z0.s, #32
+; CHECK-NEXT: ret
%pg = call <vscale x 4 x i1> @llvm.aarch64.sve.ptrue.nxv4i1(i32 31)
%elt = insertelement <vscale x 4 x i32> undef, i32 32, i32 0
%splat = shufflevector <vscale x 4 x i32> %elt, <vscale x 4 x i32> undef, <vscale x 4 x i32> zeroinitializer
ret <vscale x 4 x i32> %out
}
-define <vscale x 2 x i64> @lsr_i64(<vscale x 2 x i64> %a) {
+; Ensure we don't match a right shift by zero to the immediate form.
+define <vscale x 4 x i32> @lsr_i32_too_small(<vscale x 4 x i1> %pg, <vscale x 4 x i32> %a) {
+; CHECK-LABEL: lsr_i32_too_small:
+; CHECK: // %bb.0:
+; CHECK-NEXT: mov z1.s, #0 // =0x0
+; CHECK-NEXT: lsr z0.s, p0/m, z0.s, z1.s
+; CHECK-NEXT: ret
+ %out = call <vscale x 4 x i32> @llvm.aarch64.sve.lsr.nxv4i32(<vscale x 4 x i1> %pg,
+ <vscale x 4 x i32> %a,
+ <vscale x 4 x i32> zeroinitializer)
+ ret <vscale x 4 x i32> %out
+}
+
+define <vscale x 2 x i64> @lsr_i64(<vscale x 2 x i1> %pg, <vscale x 2 x i64> %a) {
; CHECK-LABEL: lsr_i64:
-; CHECK: lsr z0.d, z0.d, #64
-; CHECK-NEXT: ret
+; CHECK: // %bb.0:
+; CHECK-NEXT: lsr z0.d, p0/m, z0.d, #64
+; CHECK-NEXT: ret
+ %elt = insertelement <vscale x 2 x i64> undef, i64 65, i64 0
+ %splat = shufflevector <vscale x 2 x i64> %elt, <vscale x 2 x i64> undef, <vscale x 2 x i32> zeroinitializer
+ %out = call <vscale x 2 x i64> @llvm.aarch64.sve.lsr.nxv2i64(<vscale x 2 x i1> %pg,
+ <vscale x 2 x i64> %a,
+ <vscale x 2 x i64> %splat)
+ ret <vscale x 2 x i64> %out
+}
+
+define <vscale x 2 x i64> @lsr_i64_all_active(<vscale x 2 x i64> %a) {
+; CHECK-LABEL: lsr_i64_all_active:
+; CHECK: // %bb.0:
+; CHECK-NEXT: lsr z0.d, z0.d, #64
+; CHECK-NEXT: ret
%pg = call <vscale x 2 x i1> @llvm.aarch64.sve.ptrue.nxv2i1(i32 31)
%elt = insertelement <vscale x 2 x i64> undef, i64 64, i64 0
%splat = shufflevector <vscale x 2 x i64> %elt, <vscale x 2 x i64> undef, <vscale x 2 x i32> zeroinitializer
ret <vscale x 2 x i64> %out
}
+; Ensure we don't match a right shift by zero to the immediate form.
+define <vscale x 2 x i64> @lsr_i64_too_small(<vscale x 2 x i1> %pg, <vscale x 2 x i64> %a) {
+; CHECK-LABEL: lsr_i64_too_small:
+; CHECK: // %bb.0:
+; CHECK-NEXT: mov z1.d, #0 // =0x0
+; CHECK-NEXT: lsr z0.d, p0/m, z0.d, z1.d
+; CHECK-NEXT: ret
+ %out = call <vscale x 2 x i64> @llvm.aarch64.sve.lsr.nxv2i64(<vscale x 2 x i1> %pg,
+ <vscale x 2 x i64> %a,
+ <vscale x 2 x i64> zeroinitializer)
+ ret <vscale x 2 x i64> %out
+}
+
declare <vscale x 16 x i8> @llvm.aarch64.sve.sqadd.x.nxv16i8(<vscale x 16 x i8>, <vscale x 16 x i8>)
declare <vscale x 8 x i16> @llvm.aarch64.sve.sqadd.x.nxv8i16(<vscale x 8 x i16>, <vscale x 8 x i16>)
declare <vscale x 4 x i32> @llvm.aarch64.sve.sqadd.x.nxv4i32(<vscale x 4 x i32>, <vscale x 4 x i32>)