(ADR_LSL_ZZZ_D_2 $Op1, $Op2)>;
def : Pat<(nxv2i64 (int_aarch64_sve_adrd nxv2i64:$Op1, nxv2i64:$Op2)),
(ADR_LSL_ZZZ_D_3 $Op1, $Op2)>;
+
+ // Patterns to generate adr instruction.
+ // adr z0.d, [z0.d, z0.d, uxtw]
+ def : Pat<(add nxv2i64:$Op1,
+ (nxv2i64 (and nxv2i64:$Op2, (nxv2i64 (AArch64dup (i64 0xFFFFFFFF)))))),
+ (ADR_UXTW_ZZZ_D_0 $Op1, $Op2)>;
+ // adr z0.d, [z0.d, z0.d, sxtw]
+ def : Pat<(add nxv2i64:$Op1,
+ (nxv2i64 (sext_inreg nxv2i64:$Op2, nxv2i32))),
+ (ADR_SXTW_ZZZ_D_0 $Op1, $Op2)>;
+
+ // adr z0.s, [z0.s, z0.s, lsl #<shift>]
+ // adr z0.d, [z0.d, z0.d, lsl #<shift>]
+ multiclass adrShiftPat<ValueType Ty, ValueType PredTy, ValueType ShiftTy, Instruction DestAdrIns, int ShiftAmt> {
+ def : Pat<(add Ty:$Op1,
+ (Ty (AArch64lsl_p (PredTy (SVEAllActive)),
+ Ty:$Op2,
+ (Ty (AArch64dup (ShiftTy ShiftAmt)))))),
+ (DestAdrIns $Op1, $Op2)>;
+ }
+ defm : adrShiftPat<nxv2i64, nxv2i1, i64, ADR_LSL_ZZZ_D_1, 1>;
+ defm : adrShiftPat<nxv2i64, nxv2i1, i64, ADR_LSL_ZZZ_D_2, 2>;
+ defm : adrShiftPat<nxv2i64, nxv2i1, i64, ADR_LSL_ZZZ_D_3, 3>;
+ defm : adrShiftPat<nxv4i32, nxv4i1, i32, ADR_LSL_ZZZ_S_1, 1>;
+ defm : adrShiftPat<nxv4i32, nxv4i1, i32, ADR_LSL_ZZZ_S_2, 2>;
+ defm : adrShiftPat<nxv4i32, nxv4i1, i32, ADR_LSL_ZZZ_S_3, 3>;
+
+ // adr z0.d, [z0.d, z0.d, uxtw #<shift>]
+ // adr z0.d, [z0.d, z0.d, sxtw #<shift>]
+ multiclass adrXtwShiftPat<ValueType Ty, ValueType PredTy, int ShiftAmt> {
+ def : Pat<(add Ty:$Op1,
+ (Ty (AArch64lsl_p (PredTy (SVEAllActive)),
+ (Ty (and Ty:$Op2, (Ty (AArch64dup (i64 0xFFFFFFFF))))),
+ (Ty (AArch64dup (i64 ShiftAmt)))))),
+ (!cast<Instruction>("ADR_UXTW_ZZZ_D_"#ShiftAmt) $Op1, $Op2)>;
+
+ def : Pat<(add Ty:$Op1,
+ (Ty (AArch64lsl_p (PredTy (SVEAllActive)),
+ (Ty (sext_inreg Ty:$Op2, nxv2i32)),
+ (Ty (AArch64dup (i64 ShiftAmt)))))),
+ (!cast<Instruction>("ADR_SXTW_ZZZ_D_"#ShiftAmt) $Op1, $Op2)>;
+ }
+ defm : adrXtwShiftPat<nxv2i64, nxv2i1, 1>;
+ defm : adrXtwShiftPat<nxv2i64, nxv2i1, 2>;
+ defm : adrXtwShiftPat<nxv2i64, nxv2i1, 3>;
} // End HasSVE
let Predicates = [HasSVEorStreamingSVE] in {
--- /dev/null
+; NOTE: Assertions have been autogenerated by utils/update_llc_test_checks.py
+; RUN: llc < %s | FileCheck %s
+
+target triple = "aarch64-unknown-linux-gnu"
+
+;
+; ADR
+; Tests adr z0.s, [z0.s, z0.s, lsl #<1,2,3>]
+; Other formats are tested in llvm/test/CodeGen/AArch64/sve-gep.ll
+;
+
+define <vscale x 4 x i32> @adr_32bit_lsl1(<vscale x 4 x i32> %base, <vscale x 4 x i32> %idx) #0 {
+; CHECK-LABEL: adr_32bit_lsl1:
+; CHECK: // %bb.0:
+; CHECK-NEXT: adr z0.s, [z0.s, z1.s, lsl #1]
+; CHECK-NEXT: ret
+ %splat_insert = insertelement <vscale x 4 x i32> poison, i32 1, i32 0
+ %one = shufflevector <vscale x 4 x i32> %splat_insert, <vscale x 4 x i32> poison, <vscale x 4 x i32> zeroinitializer
+ %shiftedOffset = shl <vscale x 4 x i32> %idx, %one
+ %address = add <vscale x 4 x i32> %base, %shiftedOffset
+ ret <vscale x 4 x i32> %address
+}
+
+define <vscale x 4 x i32> @adr_32bit_lsl2(<vscale x 4 x i32> %base, <vscale x 4 x i32> %idx) #0 {
+; CHECK-LABEL: adr_32bit_lsl2:
+; CHECK: // %bb.0:
+; CHECK-NEXT: adr z0.s, [z0.s, z1.s, lsl #2]
+; CHECK-NEXT: ret
+ %splat_insert = insertelement <vscale x 4 x i32> poison, i32 2, i32 0
+ %two = shufflevector <vscale x 4 x i32> %splat_insert, <vscale x 4 x i32> poison, <vscale x 4 x i32> zeroinitializer
+ %shiftedOffset = shl <vscale x 4 x i32> %idx, %two
+ %address = add <vscale x 4 x i32> %base, %shiftedOffset
+ ret <vscale x 4 x i32> %address
+}
+
+define <vscale x 4 x i32> @adr_32bit_lsl3(<vscale x 4 x i32> %base, <vscale x 4 x i32> %idx) #0 {
+; CHECK-LABEL: adr_32bit_lsl3:
+; CHECK: // %bb.0:
+; CHECK-NEXT: adr z0.s, [z0.s, z1.s, lsl #3]
+; CHECK-NEXT: ret
+ %splat_insert = insertelement <vscale x 4 x i32> poison, i32 3, i32 0
+ %three = shufflevector <vscale x 4 x i32> %splat_insert, <vscale x 4 x i32> poison, <vscale x 4 x i32> zeroinitializer
+ %shiftedOffset = shl <vscale x 4 x i32> %idx, %three
+ %address = add <vscale x 4 x i32> %base, %shiftedOffset
+ ret <vscale x 4 x i32> %address
+}
+
+attributes #0 = { "target-features"="+sve" }
define <vscale x 2 x i16*> @scalable_of_fixed_3_i16(i16* %base, <vscale x 2 x i64> %idx) {
; CHECK-LABEL: scalable_of_fixed_3_i16:
; CHECK: // %bb.0:
-; CHECK-NEXT: lsl z0.d, z0.d, #1
; CHECK-NEXT: mov z1.d, x0
-; CHECK-NEXT: add z0.d, z1.d, z0.d
+; CHECK-NEXT: adr z0.d, [z1.d, z0.d, lsl #1]
; CHECK-NEXT: ret
%d = getelementptr i16, i16* %base, <vscale x 2 x i64> %idx
ret <vscale x 2 x i16*> %d
define <vscale x 2 x i32*> @scalable_of_fixed_3_i32(i32* %base, <vscale x 2 x i64> %idx) {
; CHECK-LABEL: scalable_of_fixed_3_i32:
; CHECK: // %bb.0:
-; CHECK-NEXT: lsl z0.d, z0.d, #2
; CHECK-NEXT: mov z1.d, x0
-; CHECK-NEXT: add z0.d, z1.d, z0.d
+; CHECK-NEXT: adr z0.d, [z1.d, z0.d, lsl #2]
; CHECK-NEXT: ret
%d = getelementptr i32, i32* %base, <vscale x 2 x i64> %idx
ret <vscale x 2 x i32*> %d
define <vscale x 2 x i64*> @scalable_of_fixed_3_i64(i64* %base, <vscale x 2 x i64> %idx) {
; CHECK-LABEL: scalable_of_fixed_3_i64:
; CHECK: // %bb.0:
-; CHECK-NEXT: lsl z0.d, z0.d, #3
; CHECK-NEXT: mov z1.d, x0
-; CHECK-NEXT: add z0.d, z1.d, z0.d
+; CHECK-NEXT: adr z0.d, [z1.d, z0.d, lsl #3]
; CHECK-NEXT: ret
%d = getelementptr i64, i64* %base, <vscale x 2 x i64> %idx
ret <vscale x 2 x i64*> %d
define <vscale x 2 x i8*> @scalable_of_fixed_4_i8(i8* %base, <vscale x 2 x i32> %idx) {
; CHECK-LABEL: scalable_of_fixed_4_i8:
; CHECK: // %bb.0:
-; CHECK-NEXT: ptrue p0.d
-; CHECK-NEXT: sxtw z0.d, p0/m, z0.d
; CHECK-NEXT: mov z1.d, x0
-; CHECK-NEXT: add z0.d, z1.d, z0.d
+; CHECK-NEXT: adr z0.d, [z1.d, z0.d, sxtw]
; CHECK-NEXT: ret
%d = getelementptr i8, i8* %base, <vscale x 2 x i32> %idx
ret <vscale x 2 x i8*> %d
define <vscale x 2 x i16*> @scalable_of_fixed_4_i16(i16* %base, <vscale x 2 x i32> %idx) {
; CHECK-LABEL: scalable_of_fixed_4_i16:
; CHECK: // %bb.0:
-; CHECK-NEXT: ptrue p0.d
-; CHECK-NEXT: sxtw z0.d, p0/m, z0.d
-; CHECK-NEXT: lsl z0.d, z0.d, #1
; CHECK-NEXT: mov z1.d, x0
-; CHECK-NEXT: add z0.d, z1.d, z0.d
+; CHECK-NEXT: adr z0.d, [z1.d, z0.d, sxtw #1]
; CHECK-NEXT: ret
%d = getelementptr i16, i16* %base, <vscale x 2 x i32> %idx
ret <vscale x 2 x i16*> %d
define <vscale x 2 x i32*> @scalable_of_fixed_4_i32(i32* %base, <vscale x 2 x i32> %idx) {
; CHECK-LABEL: scalable_of_fixed_4_i32:
; CHECK: // %bb.0:
-; CHECK-NEXT: ptrue p0.d
-; CHECK-NEXT: sxtw z0.d, p0/m, z0.d
-; CHECK-NEXT: lsl z0.d, z0.d, #2
; CHECK-NEXT: mov z1.d, x0
-; CHECK-NEXT: add z0.d, z1.d, z0.d
+; CHECK-NEXT: adr z0.d, [z1.d, z0.d, sxtw #2]
; CHECK-NEXT: ret
%d = getelementptr i32, i32* %base, <vscale x 2 x i32> %idx
ret <vscale x 2 x i32*> %d
define <vscale x 2 x i64*> @scalable_of_fixed_4_i64(i64* %base, <vscale x 2 x i32> %idx) {
; CHECK-LABEL: scalable_of_fixed_4_i64:
; CHECK: // %bb.0:
-; CHECK-NEXT: ptrue p0.d
-; CHECK-NEXT: sxtw z0.d, p0/m, z0.d
-; CHECK-NEXT: lsl z0.d, z0.d, #3
; CHECK-NEXT: mov z1.d, x0
-; CHECK-NEXT: add z0.d, z1.d, z0.d
+; CHECK-NEXT: adr z0.d, [z1.d, z0.d, sxtw #3]
; CHECK-NEXT: ret
%d = getelementptr i64, i64* %base, <vscale x 2 x i32> %idx
ret <vscale x 2 x i64*> %d
; CHECK-LABEL: scalable_of_fixed_5:
; CHECK: // %bb.0:
; CHECK-NEXT: mov z1.d, x0
-; CHECK-NEXT: and z0.d, z0.d, #0xffffffff
-; CHECK-NEXT: add z0.d, z1.d, z0.d
+; CHECK-NEXT: adr z0.d, [z1.d, z0.d, uxtw]
; CHECK-NEXT: ret
%idxZext = zext <vscale x 2 x i32> %idx to <vscale x 2 x i64>
%d = getelementptr i8, i8* %base, <vscale x 2 x i64> %idxZext
define <vscale x 2 x i16*> @scalable_of_fixed_5_i16(i16* %base, <vscale x 2 x i32> %idx) {
; CHECK-LABEL: scalable_of_fixed_5_i16:
; CHECK: // %bb.0:
-; CHECK-NEXT: and z0.d, z0.d, #0xffffffff
; CHECK-NEXT: mov z1.d, x0
-; CHECK-NEXT: lsl z0.d, z0.d, #1
-; CHECK-NEXT: add z0.d, z1.d, z0.d
+; CHECK-NEXT: adr z0.d, [z1.d, z0.d, uxtw #1]
; CHECK-NEXT: ret
%idxZext = zext <vscale x 2 x i32> %idx to <vscale x 2 x i64>
%d = getelementptr i16, i16* %base, <vscale x 2 x i64> %idxZext
define <vscale x 2 x i32*> @scalable_of_fixed_5_i32(i32* %base, <vscale x 2 x i32> %idx) {
; CHECK-LABEL: scalable_of_fixed_5_i32:
; CHECK: // %bb.0:
-; CHECK-NEXT: and z0.d, z0.d, #0xffffffff
; CHECK-NEXT: mov z1.d, x0
-; CHECK-NEXT: lsl z0.d, z0.d, #2
-; CHECK-NEXT: add z0.d, z1.d, z0.d
+; CHECK-NEXT: adr z0.d, [z1.d, z0.d, uxtw #2]
; CHECK-NEXT: ret
%idxZext = zext <vscale x 2 x i32> %idx to <vscale x 2 x i64>
%d = getelementptr i32, i32* %base, <vscale x 2 x i64> %idxZext
define <vscale x 2 x i64*> @scalable_of_fixed_5_i64(i64* %base, <vscale x 2 x i32> %idx) {
; CHECK-LABEL: scalable_of_fixed_5_i64:
; CHECK: // %bb.0:
-; CHECK-NEXT: and z0.d, z0.d, #0xffffffff
; CHECK-NEXT: mov z1.d, x0
-; CHECK-NEXT: lsl z0.d, z0.d, #3
-; CHECK-NEXT: add z0.d, z1.d, z0.d
+; CHECK-NEXT: adr z0.d, [z1.d, z0.d, uxtw #3]
; CHECK-NEXT: ret
%idxZext = zext <vscale x 2 x i32> %idx to <vscale x 2 x i64>
%d = getelementptr i64, i64* %base, <vscale x 2 x i64> %idxZext