defm LD1RO_H_IMM : sve_mem_ldor_si<0b01, "ld1roh", Z_h, ZPR16, nxv8i16, nxv8i1, AArch64ld1ro>;
defm LD1RO_W_IMM : sve_mem_ldor_si<0b10, "ld1row", Z_s, ZPR32, nxv4i32, nxv4i1, AArch64ld1ro>;
defm LD1RO_D_IMM : sve_mem_ldor_si<0b11, "ld1rod", Z_d, ZPR64, nxv2i64, nxv2i1, AArch64ld1ro>;
- defm LD1RO_B : sve_mem_ldor_ss<0b00, "ld1rob", Z_b, ZPR8, GPR64NoXZRshifted8>;
- defm LD1RO_H : sve_mem_ldor_ss<0b01, "ld1roh", Z_h, ZPR16, GPR64NoXZRshifted16>;
- defm LD1RO_W : sve_mem_ldor_ss<0b10, "ld1row", Z_s, ZPR32, GPR64NoXZRshifted32>;
- defm LD1RO_D : sve_mem_ldor_ss<0b11, "ld1rod", Z_d, ZPR64, GPR64NoXZRshifted64>;
+ defm LD1RO_B : sve_mem_ldor_ss<0b00, "ld1rob", Z_b, ZPR8, GPR64NoXZRshifted8, nxv16i8, nxv16i1, AArch64ld1ro, am_sve_regreg_lsl0>;
+ defm LD1RO_H : sve_mem_ldor_ss<0b01, "ld1roh", Z_h, ZPR16, GPR64NoXZRshifted16, nxv8i16, nxv8i1, AArch64ld1ro, am_sve_regreg_lsl1>;
+ defm LD1RO_W : sve_mem_ldor_ss<0b10, "ld1row", Z_s, ZPR32, GPR64NoXZRshifted32, nxv4i32, nxv4i1, AArch64ld1ro, am_sve_regreg_lsl2>;
+ defm LD1RO_D : sve_mem_ldor_ss<0b11, "ld1rod", Z_d, ZPR64, GPR64NoXZRshifted64, nxv2i64, nxv2i1, AArch64ld1ro, am_sve_regreg_lsl3>;
defm ZIP1_ZZZ_Q : sve_int_perm_bin_perm_128_zz<0b00, 0, "zip1", int_aarch64_sve_zip1q>;
defm ZIP2_ZZZ_Q : sve_int_perm_bin_perm_128_zz<0b00, 1, "zip2", int_aarch64_sve_zip2q>;
defm UZP1_ZZZ_Q : sve_int_perm_bin_perm_128_zz<0b01, 0, "uzp1", int_aarch64_sve_uzp1q>;
}
multiclass sve_mem_ldor_ss<bits<2> sz, string asm, RegisterOperand listty,
- ZPRRegOp zprty, RegisterOperand gprty> {
+ ZPRRegOp zprty, RegisterOperand gprty, ValueType Ty,
+ ValueType PredTy, SDNode Ld1ro, ComplexPattern AddrCP> {
def NAME : sve_mem_ldor_ss<sz, asm, listty, gprty>;
def : InstAlias<asm # "\t$Zt, $Pg/z, [$Rn, $Rm]",
(!cast<Instruction>(NAME) zprty:$Zt, PPR3bAny:$Pg, GPR64sp:$Rn, gprty:$Rm), 0>;
+
+ def : Pat<(Ty (Ld1ro (PredTy PPR3bAny:$gp), (AddrCP GPR64sp:$base, gprty:$offset))),
+ (!cast<Instruction>(NAME) PPR3bAny:$gp, GPR64sp:$base, gprty:$offset)>;
}
//===----------------------------------------------------------------------===//
--- /dev/null
+; RUN: llc -mtriple=aarch64-linux-gnu -mattr=+sve,+f64mm,+bf16 -asm-verbose=0 < %s | FileCheck %s
+
+;
+; LD1ROB
+;
+
+define <vscale x 16 x i8> @ld1rob_i8(<vscale x 16 x i1> %pg, i8* %a, i64 %index) nounwind {
+; CHECK-LABEL: ld1rob_i8:
+; CHECK-NEXT: ld1rob { z0.b }, p0/z, [x0, x1]
+; CHECK-NEXT: ret
+ %base = getelementptr i8, i8* %a, i64 %index
+ %load = call <vscale x 16 x i8> @llvm.aarch64.sve.ld1ro.nxv16i8(<vscale x 16 x i1> %pg, i8* %base)
+ ret <vscale x 16 x i8> %load
+}
+
+;
+; LD1ROH
+;
+
+define <vscale x 8 x i16> @ld1roh_i16(<vscale x 8 x i1> %pg, i16* %a, i64 %index) nounwind {
+; CHECK-LABEL: ld1roh_i16:
+; CHECK-NEXT: ld1roh { z0.h }, p0/z, [x0, x1, lsl #1]
+; CHECK-NEXT: ret
+ %base = getelementptr i16, i16* %a, i64 %index
+ %load = call <vscale x 8 x i16> @llvm.aarch64.sve.ld1ro.nxv8i16(<vscale x 8 x i1> %pg, i16* %base)
+ ret <vscale x 8 x i16> %load
+}
+
+define <vscale x 8 x half> @ld1roh_f16(<vscale x 8 x i1> %pg, half* %a, i64 %index) nounwind {
+; CHECK-LABEL: ld1roh_f16:
+; CHECK-NEXT: ld1roh { z0.h }, p0/z, [x0, x1, lsl #1]
+; CHECK-NEXT: ret
+ %base = getelementptr half, half* %a, i64 %index
+ %load = call <vscale x 8 x half> @llvm.aarch64.sve.ld1ro.nxv8f16(<vscale x 8 x i1> %pg, half* %base)
+ ret <vscale x 8 x half> %load
+}
+
+; bfloat - requires -mattr=+bf16
+define <vscale x 8 x bfloat> @ld1roh_bf16(<vscale x 8 x i1> %pg, bfloat* %a, i64 %index) nounwind {
+; CHECK-LABEL: ld1roh_bf16:
+; CHECK-NEXT: ld1roh { z0.h }, p0/z, [x0, x1, lsl #1]
+; CHECK-NEXT: ret
+ %base = getelementptr bfloat, bfloat* %a, i64 %index
+ %load = call <vscale x 8 x bfloat> @llvm.aarch64.sve.ld1ro.nxv8bf16(<vscale x 8 x i1> %pg, bfloat* %base)
+ ret <vscale x 8 x bfloat> %load
+}
+
+;
+; LD1ROW
+;
+
+define<vscale x 4 x i32> @ld1row_i32(<vscale x 4 x i1> %pg, i32* %a, i64 %index) nounwind {
+; CHECK-LABEL: ld1row_i32:
+; CHECK-NEXT: ld1row { z0.s }, p0/z, [x0, x1, lsl #2]
+; CHECK-NEXT: ret
+ %base = getelementptr i32, i32* %a, i64 %index
+ %load = call <vscale x 4 x i32> @llvm.aarch64.sve.ld1ro.nxv4i32(<vscale x 4 x i1> %pg, i32* %base)
+ ret <vscale x 4 x i32> %load
+}
+
+define<vscale x 4 x float> @ld1row_f32(<vscale x 4 x i1> %pg, float* %a, i64 %index) nounwind {
+; CHECK-LABEL: ld1row_f32:
+; CHECK-NEXT: ld1row { z0.s }, p0/z, [x0, x1, lsl #2]
+; CHECK-NEXT: ret
+ %base = getelementptr float, float* %a, i64 %index
+ %load = call <vscale x 4 x float> @llvm.aarch64.sve.ld1ro.nxv4f32(<vscale x 4 x i1> %pg, float* %base)
+ ret <vscale x 4 x float> %load
+}
+
+;
+; LD1ROD
+;
+
+define <vscale x 2 x i64> @ld1rod_i64(<vscale x 2 x i1> %pg, i64* %a, i64 %index) nounwind {
+; CHECK-LABEL: ld1rod_i64:
+; CHECK-NEXT: ld1rod { z0.d }, p0/z, [x0, x1, lsl #3]
+; CHECK-NEXT: ret
+ %base = getelementptr i64, i64* %a, i64 %index
+ %load = call <vscale x 2 x i64> @llvm.aarch64.sve.ld1ro.nxv2i64(<vscale x 2 x i1> %pg, i64* %base)
+ ret <vscale x 2 x i64> %load
+}
+
+define <vscale x 2 x double> @ld1rod_f64(<vscale x 2 x i1> %pg, double* %a, i64 %index) nounwind {
+; CHECK-LABEL: ld1rod_f64:
+; CHECK-NEXT: ld1rod { z0.d }, p0/z, [x0, x1, lsl #3]
+; CHECK-NEXT: ret
+ %base = getelementptr double, double* %a, i64 %index
+ %load = call <vscale x 2 x double> @llvm.aarch64.sve.ld1ro.nxv2f64(<vscale x 2 x i1> %pg, double* %base)
+ ret <vscale x 2 x double> %load
+}
+
+declare <vscale x 16 x i8> @llvm.aarch64.sve.ld1ro.nxv16i8(<vscale x 16 x i1>, i8*)
+
+declare <vscale x 8 x i16> @llvm.aarch64.sve.ld1ro.nxv8i16(<vscale x 8 x i1>, i16*)
+declare <vscale x 8 x half> @llvm.aarch64.sve.ld1ro.nxv8f16(<vscale x 8 x i1>, half*)
+declare <vscale x 8 x bfloat> @llvm.aarch64.sve.ld1ro.nxv8bf16(<vscale x 8 x i1>, bfloat*)
+
+declare <vscale x 4 x i32> @llvm.aarch64.sve.ld1ro.nxv4i32(<vscale x 4 x i1>, i32*)
+declare <vscale x 4 x float> @llvm.aarch64.sve.ld1ro.nxv4f32(<vscale x 4 x i1>, float*)
+
+declare <vscale x 2 x i64> @llvm.aarch64.sve.ld1ro.nxv2i64(<vscale x 2 x i1>, i64*)
+declare <vscale x 2 x double> @llvm.aarch64.sve.ld1ro.nxv2f64(<vscale x 2 x i1>, double*)