(!cast<Instruction>(NAME # _D) ZPR64:$Zd, PPRAny:$Pg, 0, 0), 0>;
}
-multiclass sve_int_dup_imm_pred_zero<string asm> {
- def _B : sve_int_dup_imm_pred<0b00, 0, asm, ZPR8, "/z", (ins PPRAny:$Pg, cpy_imm8_opt_lsl_i8:$imm)>;
- def _H : sve_int_dup_imm_pred<0b01, 0, asm, ZPR16, "/z", (ins PPRAny:$Pg, cpy_imm8_opt_lsl_i16:$imm)>;
- def _S : sve_int_dup_imm_pred<0b10, 0, asm, ZPR32, "/z", (ins PPRAny:$Pg, cpy_imm8_opt_lsl_i32:$imm)>;
- def _D : sve_int_dup_imm_pred<0b11, 0, asm, ZPR64, "/z", (ins PPRAny:$Pg, cpy_imm8_opt_lsl_i64:$imm)>;
-
- def : InstAlias<"mov $Zd, $Pg/z, $imm",
- (!cast<Instruction>(NAME # _B) ZPR8:$Zd, PPRAny:$Pg, cpy_imm8_opt_lsl_i8:$imm), 1>;
- def : InstAlias<"mov $Zd, $Pg/z, $imm",
- (!cast<Instruction>(NAME # _H) ZPR16:$Zd, PPRAny:$Pg, cpy_imm8_opt_lsl_i16:$imm), 1>;
+multiclass sve_int_dup_imm_pred_zero_inst<
+ bits<2> sz8_64, string asm, ZPRRegOp zprty, ValueType intty,
+ ValueType predty, imm8_opt_lsl cpyimm> {
+ def NAME : sve_int_dup_imm_pred<sz8_64, 0, asm, zprty, "/z", (ins PPRAny:$Pg, cpyimm:$imm)>;
def : InstAlias<"mov $Zd, $Pg/z, $imm",
- (!cast<Instruction>(NAME # _S) ZPR32:$Zd, PPRAny:$Pg, cpy_imm8_opt_lsl_i32:$imm), 1>;
- def : InstAlias<"mov $Zd, $Pg/z, $imm",
- (!cast<Instruction>(NAME # _D) ZPR64:$Zd, PPRAny:$Pg, cpy_imm8_opt_lsl_i64:$imm), 1>;
+ (!cast<Instruction>(NAME) zprty:$Zd, PPRAny:$Pg, cpyimm:$imm), 1>;
+ def : Pat<(intty (zext (predty PPRAny:$Ps1))),
+ (!cast<Instruction>(NAME) PPRAny:$Ps1, 1, 0)>;
+ def : Pat<(intty (sext (predty PPRAny:$Ps1))),
+ (!cast<Instruction>(NAME) PPRAny:$Ps1, -1, 0)>;
+ def : Pat<(intty (anyext (predty PPRAny:$Ps1))),
+ (!cast<Instruction>(NAME) PPRAny:$Ps1, 1, 0)>;
+}
+
+multiclass sve_int_dup_imm_pred_zero<string asm> {
+ defm _B : sve_int_dup_imm_pred_zero_inst<0b00, asm, ZPR8, nxv16i8, nxv16i1, cpy_imm8_opt_lsl_i8>;
+ defm _H : sve_int_dup_imm_pred_zero_inst<0b01, asm, ZPR16, nxv8i16, nxv8i1, cpy_imm8_opt_lsl_i16>;
+ defm _S : sve_int_dup_imm_pred_zero_inst<0b10, asm, ZPR32, nxv4i32, nxv4i1, cpy_imm8_opt_lsl_i32>;
+ defm _D : sve_int_dup_imm_pred_zero_inst<0b11, asm, ZPR64, nxv2i64, nxv2i1, cpy_imm8_opt_lsl_i64>;
}
//===----------------------------------------------------------------------===//
--- /dev/null
+; NOTE: Assertions have been autogenerated by utils/update_llc_test_checks.py
+; RUN: llc -mtriple=aarch64--linux-gnu -mattr=+sve < %s | FileCheck %s
+
+define <vscale x 16 x i8> @sext_i1_i8(<vscale x 16 x i1> %a) {
+; CHECK-LABEL: sext_i1_i8:
+; CHECK: // %bb.0:
+; CHECK-NEXT: mov z0.b, p0/z, #-1 // =0xffffffffffffffff
+; CHECK-NEXT: ret
+ %r = sext <vscale x 16 x i1> %a to <vscale x 16 x i8>
+ ret <vscale x 16 x i8> %r
+}
+
+define <vscale x 8 x i16> @sext_i1_i16(<vscale x 8 x i1> %a) {
+; CHECK-LABEL: sext_i1_i16:
+; CHECK: // %bb.0:
+; CHECK-NEXT: mov z0.h, p0/z, #-1 // =0xffffffffffffffff
+; CHECK-NEXT: ret
+ %r = sext <vscale x 8 x i1> %a to <vscale x 8 x i16>
+ ret <vscale x 8 x i16> %r
+}
+
+define <vscale x 4 x i32> @sext_i1_i32(<vscale x 4 x i1> %a) {
+; CHECK-LABEL: sext_i1_i32:
+; CHECK: // %bb.0:
+; CHECK-NEXT: mov z0.s, p0/z, #-1 // =0xffffffffffffffff
+; CHECK-NEXT: ret
+ %r = sext <vscale x 4 x i1> %a to <vscale x 4 x i32>
+ ret <vscale x 4 x i32> %r
+}
+
+define <vscale x 2 x i64> @sext_i1_i64(<vscale x 2 x i1> %a) {
+; CHECK-LABEL: sext_i1_i64:
+; CHECK: // %bb.0:
+; CHECK-NEXT: mov z0.d, p0/z, #-1 // =0xffffffffffffffff
+; CHECK-NEXT: ret
+ %r = sext <vscale x 2 x i1> %a to <vscale x 2 x i64>
+ ret <vscale x 2 x i64> %r
+}
+
+define <vscale x 16 x i8> @zext_i1_i8(<vscale x 16 x i1> %a) {
+; CHECK-LABEL: zext_i1_i8:
+; CHECK: // %bb.0:
+; CHECK-NEXT: mov z0.b, p0/z, #1 // =0x1
+; CHECK-NEXT: ret
+ %r = zext <vscale x 16 x i1> %a to <vscale x 16 x i8>
+ ret <vscale x 16 x i8> %r
+}
+
+define <vscale x 8 x i16> @zext_i1_i16(<vscale x 8 x i1> %a) {
+; CHECK-LABEL: zext_i1_i16:
+; CHECK: // %bb.0:
+; CHECK-NEXT: mov z0.h, p0/z, #1 // =0x1
+; CHECK-NEXT: ret
+ %r = zext <vscale x 8 x i1> %a to <vscale x 8 x i16>
+ ret <vscale x 8 x i16> %r
+}
+
+define <vscale x 4 x i32> @zext_i1_i32(<vscale x 4 x i1> %a) {
+; CHECK-LABEL: zext_i1_i32:
+; CHECK: // %bb.0:
+; CHECK-NEXT: mov z0.s, p0/z, #1 // =0x1
+; CHECK-NEXT: ret
+ %r = zext <vscale x 4 x i1> %a to <vscale x 4 x i32>
+ ret <vscale x 4 x i32> %r
+}
+
+define <vscale x 2 x i64> @zext_i1_i64(<vscale x 2 x i1> %a) {
+; CHECK-LABEL: zext_i1_i64:
+; CHECK: // %bb.0:
+; CHECK-NEXT: mov z0.d, p0/z, #1 // =0x1
+; CHECK-NEXT: ret
+ %r = zext <vscale x 2 x i1> %a to <vscale x 2 x i64>
+ ret <vscale x 2 x i64> %r
+}
+
+define <vscale x 8 x i16> @sext_i8_i16(<vscale x 8 x i8> %a) {
+; CHECK-LABEL: sext_i8_i16:
+; CHECK: // %bb.0:
+; CHECK-NEXT: ptrue p0.h
+; CHECK-NEXT: sxtb z0.h, p0/m, z0.h
+; CHECK-NEXT: ret
+ %r = sext <vscale x 8 x i8> %a to <vscale x 8 x i16>
+ ret <vscale x 8 x i16> %r
+}
+
+define <vscale x 4 x i32> @sext_i8_i32(<vscale x 4 x i8> %a) {
+; CHECK-LABEL: sext_i8_i32:
+; CHECK: // %bb.0:
+; CHECK-NEXT: ptrue p0.s
+; CHECK-NEXT: sxtb z0.s, p0/m, z0.s
+; CHECK-NEXT: ret
+ %r = sext <vscale x 4 x i8> %a to <vscale x 4 x i32>
+ ret <vscale x 4 x i32> %r
+}
+
+define <vscale x 2 x i64> @sext_i8_i64(<vscale x 2 x i8> %a) {
+; CHECK-LABEL: sext_i8_i64:
+; CHECK: // %bb.0:
+; CHECK-NEXT: ptrue p0.d
+; CHECK-NEXT: sxtb z0.d, p0/m, z0.d
+; CHECK-NEXT: ret
+ %r = sext <vscale x 2 x i8> %a to <vscale x 2 x i64>
+ ret <vscale x 2 x i64> %r
+}
+
+define <vscale x 8 x i16> @zext_i8_i16(<vscale x 8 x i8> %a) {
+; CHECK-LABEL: zext_i8_i16:
+; CHECK: // %bb.0:
+; CHECK-NEXT: and z0.h, z0.h, #0xff
+; CHECK-NEXT: ret
+ %r = zext <vscale x 8 x i8> %a to <vscale x 8 x i16>
+ ret <vscale x 8 x i16> %r
+}
+
+define <vscale x 4 x i32> @zext_i8_i32(<vscale x 4 x i8> %a) {
+; CHECK-LABEL: zext_i8_i32:
+; CHECK: // %bb.0:
+; CHECK-NEXT: and z0.s, z0.s, #0xff
+; CHECK-NEXT: ret
+ %r = zext <vscale x 4 x i8> %a to <vscale x 4 x i32>
+ ret <vscale x 4 x i32> %r
+}
+
+define <vscale x 2 x i64> @zext_i8_i64(<vscale x 2 x i8> %a) {
+; CHECK-LABEL: zext_i8_i64:
+; CHECK: // %bb.0:
+; CHECK-NEXT: and z0.d, z0.d, #0xff
+; CHECK-NEXT: ret
+ %r = zext <vscale x 2 x i8> %a to <vscale x 2 x i64>
+ ret <vscale x 2 x i64> %r
+}
+
+define <vscale x 4 x i32> @sext_i16_i32(<vscale x 4 x i16> %a) {
+; CHECK-LABEL: sext_i16_i32:
+; CHECK: // %bb.0:
+; CHECK-NEXT: ptrue p0.s
+; CHECK-NEXT: sxth z0.s, p0/m, z0.s
+; CHECK-NEXT: ret
+ %r = sext <vscale x 4 x i16> %a to <vscale x 4 x i32>
+ ret <vscale x 4 x i32> %r
+}
+
+define <vscale x 2 x i64> @sext_i16_i64(<vscale x 2 x i16> %a) {
+; CHECK-LABEL: sext_i16_i64:
+; CHECK: // %bb.0:
+; CHECK-NEXT: ptrue p0.d
+; CHECK-NEXT: sxth z0.d, p0/m, z0.d
+; CHECK-NEXT: ret
+ %r = sext <vscale x 2 x i16> %a to <vscale x 2 x i64>
+ ret <vscale x 2 x i64> %r
+}
+
+define <vscale x 4 x i32> @zext_i16_i32(<vscale x 4 x i16> %a) {
+; CHECK-LABEL: zext_i16_i32:
+; CHECK: // %bb.0:
+; CHECK-NEXT: and z0.s, z0.s, #0xffff
+; CHECK-NEXT: ret
+ %r = zext <vscale x 4 x i16> %a to <vscale x 4 x i32>
+ ret <vscale x 4 x i32> %r
+}
+
+define <vscale x 2 x i64> @zext_i16_i64(<vscale x 2 x i16> %a) {
+; CHECK-LABEL: zext_i16_i64:
+; CHECK: // %bb.0:
+; CHECK-NEXT: and z0.d, z0.d, #0xffff
+; CHECK-NEXT: ret
+ %r = zext <vscale x 2 x i16> %a to <vscale x 2 x i64>
+ ret <vscale x 2 x i64> %r
+}
+
+define <vscale x 2 x i64> @sext_i32_i64(<vscale x 2 x i32> %a) {
+; CHECK-LABEL: sext_i32_i64:
+; CHECK: // %bb.0:
+; CHECK-NEXT: ptrue p0.d
+; CHECK-NEXT: sxtw z0.d, p0/m, z0.d
+; CHECK-NEXT: ret
+ %r = sext <vscale x 2 x i32> %a to <vscale x 2 x i64>
+ ret <vscale x 2 x i64> %r
+}
+
+define <vscale x 2 x i64> @zext_i32_i64(<vscale x 2 x i32> %a) {
+; CHECK-LABEL: zext_i32_i64:
+; CHECK: // %bb.0:
+; CHECK-NEXT: and z0.d, z0.d, #0xffffffff
+; CHECK-NEXT: ret
+ %r = zext <vscale x 2 x i32> %a to <vscale x 2 x i64>
+ ret <vscale x 2 x i64> %r
+}