(REG_SEQUENCE ZPR4Mul4, vt:$Zn1, zsub0, vt:$Zn2, zsub1, vt:$Zn3, zsub2, vt:$Zn4, zsub3),
zpr_ty:$Zm, imm_ty:$i)>;
+class SME2_Sat_Shift_VG2_Pat<string name, SDPatternOperator intrinsic, ValueType out_vt, ValueType in_vt, Operand imm_ty>
+ : Pat<(out_vt (intrinsic in_vt:$Zn1, in_vt:$Zn2, (i32 imm_ty:$i))),
+ (!cast<Instruction>(name) (REG_SEQUENCE ZPR2Mul2, in_vt:$Zn1, zsub0, in_vt:$Zn2, zsub1), imm_ty:$i)>;
+
+class SME2_Sat_Shift_VG4_Pat<string name, SDPatternOperator intrinsic, ValueType out_vt, ValueType in_vt, Operand imm_ty>
+ : Pat<(out_vt (intrinsic in_vt:$Zn1, in_vt:$Zn2, in_vt:$Zn3, in_vt:$Zn4, (i32 imm_ty:$i))),
+ (!cast<Instruction>(name) (REG_SEQUENCE ZPR4Mul4, in_vt:$Zn1, zsub0, in_vt:$Zn2, zsub1, in_vt:$Zn3, zsub2, in_vt:$Zn4, zsub3),
+ imm_ty:$i)>;
+
class SME2_Cvt_VG4_Pat<string name, SDPatternOperator intrinsic, ValueType out_vt, ValueType in_vt>
: Pat<(out_vt (intrinsic in_vt:$Zn1, in_vt:$Zn2, in_vt:$Zn3, in_vt:$Zn4)),
(!cast<Instruction>(name) (REG_SEQUENCE ZPR4Mul4, in_vt:$Zn1, zsub0, in_vt:$Zn2, zsub1, in_vt:$Zn3, zsub2, in_vt:$Zn4, zsub3))>;
//===----------------------------------------------------------------------===//
// SME2 multi-vec saturating shift right narrow
class sme2_sat_shift_vector_vg2<string mnemonic, bit op, bit u>
- : I<(outs ZPR16:$Zd), (ins ZZ_s_mul_r:$Zn, vecshiftR16:$imm4),
+ : I<(outs ZPR16:$Zd), (ins ZZ_s_mul_r:$Zn, tvecshiftR16:$imm4),
mnemonic, "\t$Zd, $Zn, $imm4",
"", []>, Sched<[]> {
bits<4> imm4;
let Inst{4-0} = Zd;
}
-multiclass sme2_sat_shift_vector_vg2<string mnemonic, bit op, bit u> {
+multiclass sme2_sat_shift_vector_vg2<string mnemonic, bit op, bit u, SDPatternOperator intrinsic> {
def _H : sme2_sat_shift_vector_vg2<mnemonic, op, u>;
+
+ def : SME2_Sat_Shift_VG2_Pat<NAME # _H, intrinsic, nxv8i16, nxv4i32, tvecshiftR16>;
}
class sme2_sat_shift_vector_vg4<bits<2> sz, bits<3> op, ZPRRegOp zpr_ty,
let Inst{4-0} = Zd;
}
-multiclass sme2_sat_shift_vector_vg4<string mnemonic, bits<3> op> {
- def _B : sme2_sat_shift_vector_vg4<{0,1}, op, ZPR8, ZZZZ_s_mul_r, vecshiftR32,
+multiclass sme2_sat_shift_vector_vg4<string mnemonic, bits<3> op, SDPatternOperator intrinsic> {
+ def _B : sme2_sat_shift_vector_vg4<{0,1}, op, ZPR8, ZZZZ_s_mul_r, tvecshiftR32,
mnemonic>{
bits<5> imm;
let Inst{20-16} = imm;
}
- def _H : sme2_sat_shift_vector_vg4<{1,?}, op, ZPR16, ZZZZ_d_mul_r, vecshiftR64,
+ def _H : sme2_sat_shift_vector_vg4<{1,?}, op, ZPR16, ZZZZ_d_mul_r, tvecshiftR64,
mnemonic> {
bits<6> imm;
let Inst{22} = imm{5};
let Inst{20-16} = imm{4-0};
}
+
+ def : SME2_Sat_Shift_VG4_Pat<NAME # _B, intrinsic, nxv16i8, nxv4i32, tvecshiftR32>;
+ def : SME2_Sat_Shift_VG4_Pat<NAME # _H, intrinsic, nxv8i16, nxv2i64, tvecshiftR64>;
}
//===----------------------------------------------------------------------===//
--- /dev/null
+; NOTE: Assertions have been autogenerated by utils/update_llc_test_checks.py
+; RUN: llc -mtriple=aarch64-linux-gnu -mattr=+sme2 -verify-machineinstrs < %s | FileCheck %s
+
+;
+; S/UQRSHR x2
+;
+
+define <vscale x 8 x i16> @multi_vector_sat_shift_narrow_x2_s16(<vscale x 4 x i32> %unused, <vscale x 4 x i32> %zn1, <vscale x 4 x i32> %zn2) {
+; CHECK-LABEL: multi_vector_sat_shift_narrow_x2_s16:
+; CHECK: // %bb.0:
+; CHECK-NEXT: mov z3.d, z2.d
+; CHECK-NEXT: mov z2.d, z1.d
+; CHECK-NEXT: sqrshr z0.h, { z2.s, z3.s }, #16
+; CHECK-NEXT: ret
+ %res = call <vscale x 8 x i16> @llvm.aarch64.sve.sqrshr.x2.nxv8i16(<vscale x 4 x i32> %zn1, <vscale x 4 x i32> %zn2, i32 16)
+ ret <vscale x 8 x i16> %res
+}
+
+define <vscale x 8 x i16> @multi_vector_sat_shift_narrow_x2_u16(<vscale x 4 x i32> %unused, <vscale x 4 x i32> %zn1, <vscale x 4 x i32> %zn2) {
+; CHECK-LABEL: multi_vector_sat_shift_narrow_x2_u16:
+; CHECK: // %bb.0:
+; CHECK-NEXT: mov z3.d, z2.d
+; CHECK-NEXT: mov z2.d, z1.d
+; CHECK-NEXT: uqrshr z0.h, { z2.s, z3.s }, #16
+; CHECK-NEXT: ret
+ %res = call <vscale x 8 x i16> @llvm.aarch64.sve.uqrshr.x2.nxv8i16(<vscale x 4 x i32> %zn1, <vscale x 4 x i32> %zn2, i32 16)
+ ret <vscale x 8 x i16> %res
+}
+
+;
+; S/UQRSHR x4
+;
+
+define <vscale x 16 x i8> @multi_vector_sat_shift_narrow_x4_s8(<vscale x 4 x i32> %unused, <vscale x 4 x i32> %zn1, <vscale x 4 x i32> %zn2, <vscale x 4 x i32> %zn3, <vscale x 4 x i32> %zn4) {
+; CHECK-LABEL: multi_vector_sat_shift_narrow_x4_s8:
+; CHECK: // %bb.0:
+; CHECK-NEXT: mov z7.d, z4.d
+; CHECK-NEXT: mov z6.d, z3.d
+; CHECK-NEXT: mov z5.d, z2.d
+; CHECK-NEXT: mov z4.d, z1.d
+; CHECK-NEXT: sqrshr z0.b, { z4.s - z7.s }, #32
+; CHECK-NEXT: ret
+ %res = call <vscale x 16 x i8> @llvm.aarch64.sve.sqrshr.x4.nxv16i8(<vscale x 4 x i32> %zn1, <vscale x 4 x i32> %zn2, <vscale x 4 x i32> %zn3, <vscale x 4 x i32> %zn4, i32 32)
+ ret <vscale x 16 x i8> %res
+}
+
+define <vscale x 8 x i16> @multi_vector_sat_shift_narrow_x4_s16(<vscale x 2 x i64> %unused, <vscale x 2 x i64> %zn1, <vscale x 2 x i64> %zn2, <vscale x 2 x i64> %zn3, <vscale x 2 x i64> %zn4) {
+; CHECK-LABEL: multi_vector_sat_shift_narrow_x4_s16:
+; CHECK: // %bb.0:
+; CHECK-NEXT: mov z7.d, z4.d
+; CHECK-NEXT: mov z6.d, z3.d
+; CHECK-NEXT: mov z5.d, z2.d
+; CHECK-NEXT: mov z4.d, z1.d
+; CHECK-NEXT: sqrshr z0.h, { z4.d - z7.d }, #64
+; CHECK-NEXT: ret
+ %res = call <vscale x 8 x i16> @llvm.aarch64.sve.sqrshr.x4.nxv8i16(<vscale x 2 x i64> %zn1, <vscale x 2 x i64> %zn2, <vscale x 2 x i64> %zn3, <vscale x 2 x i64> %zn4, i32 64)
+ ret <vscale x 8 x i16> %res
+}
+
+define <vscale x 16 x i8> @multi_vector_sat_shift_narrow_x4_u8(<vscale x 4 x i32> %unused, <vscale x 4 x i32> %zn1, <vscale x 4 x i32> %zn2, <vscale x 4 x i32> %zn3, <vscale x 4 x i32> %zn4) {
+; CHECK-LABEL: multi_vector_sat_shift_narrow_x4_u8:
+; CHECK: // %bb.0:
+; CHECK-NEXT: mov z7.d, z4.d
+; CHECK-NEXT: mov z6.d, z3.d
+; CHECK-NEXT: mov z5.d, z2.d
+; CHECK-NEXT: mov z4.d, z1.d
+; CHECK-NEXT: uqrshr z0.b, { z4.s - z7.s }, #32
+; CHECK-NEXT: ret
+ %res = call <vscale x 16 x i8> @llvm.aarch64.sve.uqrshr.x4.nxv16i8(<vscale x 4 x i32> %zn1, <vscale x 4 x i32> %zn2, <vscale x 4 x i32> %zn3, <vscale x 4 x i32> %zn4, i32 32)
+ ret <vscale x 16 x i8> %res
+}
+
+define <vscale x 8 x i16> @multi_vector_sat_shift_narrow_x4_u16(<vscale x 2 x i64> %unused, <vscale x 2 x i64> %zn1, <vscale x 2 x i64> %zn2, <vscale x 2 x i64> %zn3, <vscale x 2 x i64> %zn4) {
+; CHECK-LABEL: multi_vector_sat_shift_narrow_x4_u16:
+; CHECK: // %bb.0:
+; CHECK-NEXT: mov z7.d, z4.d
+; CHECK-NEXT: mov z6.d, z3.d
+; CHECK-NEXT: mov z5.d, z2.d
+; CHECK-NEXT: mov z4.d, z1.d
+; CHECK-NEXT: uqrshr z0.h, { z4.d - z7.d }, #64
+; CHECK-NEXT: ret
+ %res = call <vscale x 8 x i16> @llvm.aarch64.sve.uqrshr.x4.nxv8i16(<vscale x 2 x i64> %zn1, <vscale x 2 x i64> %zn2, <vscale x 2 x i64> %zn3, <vscale x 2 x i64> %zn4, i32 64)
+ ret <vscale x 8 x i16> %res
+}
+
+; S/UQRSHRN x4
+
+define <vscale x 16 x i8> @multi_vector_sat_shift_narrow_interleave_x4_s8(<vscale x 4 x i32> %unused, <vscale x 4 x i32> %zn1, <vscale x 4 x i32> %zn2, <vscale x 4 x i32> %zn3, <vscale x 4 x i32> %zn4) {
+; CHECK-LABEL: multi_vector_sat_shift_narrow_interleave_x4_s8:
+; CHECK: // %bb.0:
+; CHECK-NEXT: mov z7.d, z4.d
+; CHECK-NEXT: mov z6.d, z3.d
+; CHECK-NEXT: mov z5.d, z2.d
+; CHECK-NEXT: mov z4.d, z1.d
+; CHECK-NEXT: sqrshrn z0.b, { z4.s - z7.s }, #32
+; CHECK-NEXT: ret
+ %res = call <vscale x 16 x i8> @llvm.aarch64.sve.sqrshrn.x4.nxv16i8(<vscale x 4 x i32> %zn1, <vscale x 4 x i32> %zn2, <vscale x 4 x i32> %zn3, <vscale x 4 x i32> %zn4, i32 32)
+ ret <vscale x 16 x i8> %res
+}
+
+define <vscale x 8 x i16> @multi_vector_sat_shift_narrow_interleave_x4_s16(<vscale x 2 x i64> %unused, <vscale x 2 x i64> %zn1, <vscale x 2 x i64> %zn2, <vscale x 2 x i64> %zn3, <vscale x 2 x i64> %zn4) {
+; CHECK-LABEL: multi_vector_sat_shift_narrow_interleave_x4_s16:
+; CHECK: // %bb.0:
+; CHECK-NEXT: mov z7.d, z4.d
+; CHECK-NEXT: mov z6.d, z3.d
+; CHECK-NEXT: mov z5.d, z2.d
+; CHECK-NEXT: mov z4.d, z1.d
+; CHECK-NEXT: sqrshrn z0.h, { z4.d - z7.d }, #64
+; CHECK-NEXT: ret
+ %res = call <vscale x 8 x i16> @llvm.aarch64.sve.sqrshrn.x4.nxv8i16(<vscale x 2 x i64> %zn1, <vscale x 2 x i64> %zn2, <vscale x 2 x i64> %zn3, <vscale x 2 x i64> %zn4, i32 64)
+ ret <vscale x 8 x i16> %res
+}
+
+define <vscale x 16 x i8> @multi_vector_sat_shift_narrow_interleave_x4_u8(<vscale x 2 x i64> %unused, <vscale x 4 x i32> %zn1, <vscale x 4 x i32> %zn2, <vscale x 4 x i32> %zn3, <vscale x 4 x i32> %zn4) {
+; CHECK-LABEL: multi_vector_sat_shift_narrow_interleave_x4_u8:
+; CHECK: // %bb.0:
+; CHECK-NEXT: mov z7.d, z4.d
+; CHECK-NEXT: mov z6.d, z3.d
+; CHECK-NEXT: mov z5.d, z2.d
+; CHECK-NEXT: mov z4.d, z1.d
+; CHECK-NEXT: uqrshrn z0.b, { z4.s - z7.s }, #32
+; CHECK-NEXT: ret
+ %res = call <vscale x 16 x i8> @llvm.aarch64.sve.uqrshrn.x4.nxv16i8(<vscale x 4 x i32> %zn1, <vscale x 4 x i32> %zn2, <vscale x 4 x i32> %zn3, <vscale x 4 x i32> %zn4, i32 32)
+ ret <vscale x 16 x i8> %res
+}
+
+define <vscale x 8 x i16> @multi_vector_sat_shift_narrow_interleave_x4_u16(<vscale x 2 x i64> %unused, <vscale x 2 x i64> %zn1, <vscale x 2 x i64> %zn2, <vscale x 2 x i64> %zn3, <vscale x 2 x i64> %zn4) {
+; CHECK-LABEL: multi_vector_sat_shift_narrow_interleave_x4_u16:
+; CHECK: // %bb.0:
+; CHECK-NEXT: mov z7.d, z4.d
+; CHECK-NEXT: mov z6.d, z3.d
+; CHECK-NEXT: mov z5.d, z2.d
+; CHECK-NEXT: mov z4.d, z1.d
+; CHECK-NEXT: uqrshrn z0.h, { z4.d - z7.d }, #64
+; CHECK-NEXT: ret
+ %res = call <vscale x 8 x i16> @llvm.aarch64.sve.uqrshrn.x4.nxv8i16(<vscale x 2 x i64> %zn1, <vscale x 2 x i64> %zn2, <vscale x 2 x i64> %zn3, <vscale x 2 x i64> %zn4, i32 64)
+ ret <vscale x 8 x i16> %res
+}
+
+; SQRSHRU x2
+
+define <vscale x 8 x i16> @multi_vector_sat_shift_unsigned_narrow_x2_u16(<vscale x 4 x i32> %unused, <vscale x 4 x i32> %zn1, <vscale x 4 x i32> %zn2) {
+; CHECK-LABEL: multi_vector_sat_shift_unsigned_narrow_x2_u16:
+; CHECK: // %bb.0:
+; CHECK-NEXT: mov z3.d, z2.d
+; CHECK-NEXT: mov z2.d, z1.d
+; CHECK-NEXT: sqrshru z0.h, { z2.s, z3.s }, #16
+; CHECK-NEXT: ret
+ %res = call <vscale x 8 x i16> @llvm.aarch64.sve.sqrshru.x2.nxv8i16(<vscale x 4 x i32> %zn1, <vscale x 4 x i32> %zn2, i32 16)
+ ret <vscale x 8 x i16> %res
+}
+
+; SQRSHRU x4
+
+define <vscale x 16 x i8> @multi_vector_sat_shift_unsigned_narrow_x4_u8(<vscale x 4 x i32> %unused, <vscale x 4 x i32> %zn1, <vscale x 4 x i32> %zn2, <vscale x 4 x i32> %zn3, <vscale x 4 x i32> %zn4) {
+; CHECK-LABEL: multi_vector_sat_shift_unsigned_narrow_x4_u8:
+; CHECK: // %bb.0:
+; CHECK-NEXT: mov z7.d, z4.d
+; CHECK-NEXT: mov z6.d, z3.d
+; CHECK-NEXT: mov z5.d, z2.d
+; CHECK-NEXT: mov z4.d, z1.d
+; CHECK-NEXT: sqrshru z0.b, { z4.s - z7.s }, #32
+; CHECK-NEXT: ret
+ %res = call <vscale x 16 x i8> @llvm.aarch64.sve.sqrshru.x4.nxv16i8(<vscale x 4 x i32> %zn1, <vscale x 4 x i32> %zn2, <vscale x 4 x i32> %zn3, <vscale x 4 x i32> %zn4, i32 32)
+ ret <vscale x 16 x i8> %res
+}
+
+define <vscale x 8 x i16> @multi_vector_sat_shift_unsigned_narrow_x4_u16(<vscale x 2 x i64> %unused, <vscale x 2 x i64> %zn1, <vscale x 2 x i64> %zn2, <vscale x 2 x i64> %zn3, <vscale x 2 x i64> %zn4) {
+; CHECK-LABEL: multi_vector_sat_shift_unsigned_narrow_x4_u16:
+; CHECK: // %bb.0:
+; CHECK-NEXT: mov z7.d, z4.d
+; CHECK-NEXT: mov z6.d, z3.d
+; CHECK-NEXT: mov z5.d, z2.d
+; CHECK-NEXT: mov z4.d, z1.d
+; CHECK-NEXT: sqrshru z0.h, { z4.d - z7.d }, #64
+; CHECK-NEXT: ret
+ %res = call <vscale x 8 x i16> @llvm.aarch64.sve.sqrshru.x4.nxv8i16(<vscale x 2 x i64> %zn1, <vscale x 2 x i64> %zn2, <vscale x 2 x i64> %zn3, <vscale x 2 x i64> %zn4, i32 64)
+ ret <vscale x 8 x i16> %res
+}
+
+; SQRSHRUN x4
+
+define <vscale x 16 x i8> @multi_vector_sat_shift_unsigned_narrow_interleave_x4_u8(<vscale x 4 x i32> %unused, <vscale x 4 x i32> %zn1, <vscale x 4 x i32> %zn2, <vscale x 4 x i32> %zn3, <vscale x 4 x i32> %zn4) {
+; CHECK-LABEL: multi_vector_sat_shift_unsigned_narrow_interleave_x4_u8:
+; CHECK: // %bb.0:
+; CHECK-NEXT: mov z7.d, z4.d
+; CHECK-NEXT: mov z6.d, z3.d
+; CHECK-NEXT: mov z5.d, z2.d
+; CHECK-NEXT: mov z4.d, z1.d
+; CHECK-NEXT: sqrshrun z0.b, { z4.s - z7.s }, #32
+; CHECK-NEXT: ret
+ %res = call <vscale x 16 x i8> @llvm.aarch64.sve.sqrshrun.x4.nxv16i8(<vscale x 4 x i32> %zn1, <vscale x 4 x i32> %zn2, <vscale x 4 x i32> %zn3, <vscale x 4 x i32> %zn4, i32 32)
+ ret <vscale x 16 x i8> %res
+}
+
+define <vscale x 8 x i16> @multi_vector_sat_shift_unsigned_narrow_interleave_x4_u16(<vscale x 2 x i64> %unused, <vscale x 2 x i64> %zn1, <vscale x 2 x i64> %zn2, <vscale x 2 x i64> %zn3, <vscale x 2 x i64> %zn4) {
+; CHECK-LABEL: multi_vector_sat_shift_unsigned_narrow_interleave_x4_u16:
+; CHECK: // %bb.0:
+; CHECK-NEXT: mov z7.d, z4.d
+; CHECK-NEXT: mov z6.d, z3.d
+; CHECK-NEXT: mov z5.d, z2.d
+; CHECK-NEXT: mov z4.d, z1.d
+; CHECK-NEXT: sqrshrun z0.h, { z4.d - z7.d }, #64
+; CHECK-NEXT: ret
+ %res = call <vscale x 8 x i16> @llvm.aarch64.sve.sqrshrun.x4.nxv8i16(<vscale x 2 x i64> %zn1, <vscale x 2 x i64> %zn2, <vscale x 2 x i64> %zn3, <vscale x 2 x i64> %zn4, i32 64)
+ ret <vscale x 8 x i16> %res
+}
+
+declare <vscale x 8 x i16> @llvm.aarch64.sve.sqrshr.x2.nxv8i16(<vscale x 4 x i32>, <vscale x 4 x i32>, i32)
+
+declare <vscale x 16 x i8> @llvm.aarch64.sve.sqrshr.x4.nxv16i8(<vscale x 4 x i32>, <vscale x 4 x i32>, <vscale x 4 x i32>, <vscale x 4 x i32>, i32)
+declare <vscale x 8 x i16> @llvm.aarch64.sve.sqrshr.x4.nxv8i16(<vscale x 2 x i64>, <vscale x 2 x i64>, <vscale x 2 x i64>, <vscale x 2 x i64>, i32)
+
+declare <vscale x 8 x i16> @llvm.aarch64.sve.uqrshr.x2.nxv8i16(<vscale x 4 x i32>, <vscale x 4 x i32>, i32)
+
+declare <vscale x 16 x i8> @llvm.aarch64.sve.uqrshr.x4.nxv16i8(<vscale x 4 x i32>, <vscale x 4 x i32>, <vscale x 4 x i32>, <vscale x 4 x i32>, i32)
+declare <vscale x 8 x i16> @llvm.aarch64.sve.uqrshr.x4.nxv8i16(<vscale x 2 x i64>, <vscale x 2 x i64>, <vscale x 2 x i64>, <vscale x 2 x i64>, i32)
+
+declare <vscale x 16 x i8> @llvm.aarch64.sve.sqrshrn.x4.nxv16i8(<vscale x 4 x i32>, <vscale x 4 x i32>, <vscale x 4 x i32>, <vscale x 4 x i32>, i32)
+declare <vscale x 8 x i16> @llvm.aarch64.sve.sqrshrn.x4.nxv8i16(<vscale x 2 x i64>, <vscale x 2 x i64>, <vscale x 2 x i64>, <vscale x 2 x i64>, i32)
+
+declare <vscale x 16 x i8> @llvm.aarch64.sve.uqrshrn.x4.nxv16i8(<vscale x 4 x i32>, <vscale x 4 x i32>, <vscale x 4 x i32>, <vscale x 4 x i32>, i32)
+declare <vscale x 8 x i16> @llvm.aarch64.sve.uqrshrn.x4.nxv8i16(<vscale x 2 x i64>, <vscale x 2 x i64>, <vscale x 2 x i64>, <vscale x 2 x i64>, i32)
+
+declare <vscale x 8 x i16> @llvm.aarch64.sve.sqrshru.x2.nxv8i16(<vscale x 4 x i32>, <vscale x 4 x i32>, i32)
+
+declare <vscale x 16 x i8> @llvm.aarch64.sve.sqrshru.x4.nxv16i8(<vscale x 4 x i32>, <vscale x 4 x i32>, <vscale x 4 x i32>, <vscale x 4 x i32>, i32)
+declare <vscale x 8 x i16> @llvm.aarch64.sve.sqrshru.x4.nxv8i16(<vscale x 2 x i64>, <vscale x 2 x i64>, <vscale x 2 x i64>, <vscale x 2 x i64>, i32)
+
+declare <vscale x 16 x i8> @llvm.aarch64.sve.sqrshrun.x4.nxv16i8(<vscale x 4 x i32>, <vscale x 4 x i32>, <vscale x 4 x i32>, <vscale x 4 x i32>, i32)
+declare <vscale x 8 x i16> @llvm.aarch64.sve.sqrshrun.x4.nxv8i16(<vscale x 2 x i64>, <vscale x 2 x i64>, <vscale x 2 x i64>, <vscale x 2 x i64>, i32)