if (!LoMemVT.isByteSized() || !HiMemVT.isByteSized())
return TLI.scalarizeVectorStore(N, DAG);
- unsigned IncrementSize = LoMemVT.getSizeInBits()/8;
+ unsigned IncrementSize = LoMemVT.getSizeInBits().getKnownMinSize() / 8;
if (isTruncating)
Lo = DAG.getTruncStore(Ch, DL, Lo, Ptr, N->getPointerInfo(), LoMemVT,
Lo = DAG.getStore(Ch, DL, Lo, Ptr, N->getPointerInfo(), Alignment, MMOFlags,
AAInfo);
- // Increment the pointer to the other half.
- Ptr = DAG.getObjectPtrOffset(DL, Ptr, IncrementSize);
+ MachinePointerInfo MPI;
+ if (LoMemVT.isScalableVector()) {
+ SDValue BytesIncrement = DAG.getVScale(
+ DL, Ptr.getValueType(),
+ APInt(Ptr.getValueSizeInBits().getFixedSize(), IncrementSize));
+ MPI = MachinePointerInfo(N->getPointerInfo().getAddrSpace());
+ Ptr = DAG.getNode(ISD::ADD, DL, Ptr.getValueType(), Ptr, BytesIncrement);
+ } else {
+ MPI = N->getPointerInfo().getWithOffset(IncrementSize);
+ // Increment the pointer to the other half.
+ Ptr = DAG.getObjectPtrOffset(DL, Ptr, IncrementSize);
+ }
if (isTruncating)
- Hi = DAG.getTruncStore(Ch, DL, Hi, Ptr,
- N->getPointerInfo().getWithOffset(IncrementSize),
+ Hi = DAG.getTruncStore(Ch, DL, Hi, Ptr, MPI,
HiMemVT, Alignment, MMOFlags, AAInfo);
else
- Hi = DAG.getStore(Ch, DL, Hi, Ptr,
- N->getPointerInfo().getWithOffset(IncrementSize),
- Alignment, MMOFlags, AAInfo);
+ Hi = DAG.getStore(Ch, DL, Hi, Ptr, MPI, Alignment, MMOFlags, AAInfo);
return DAG.getNode(ISD::TokenFactor, DL, MVT::Other, Lo, Hi);
}
--- /dev/null
+; NOTE: Assertions have been autogenerated by utils/update_llc_test_checks.py
+; RUN: llc -mtriple=aarch64-linux-gnu -mattr=+sve < %s | FileCheck %s
+
+define void @store_promote_4i8(<vscale x 4 x i8> %data, <vscale x 4 x i8>* %a) {
+; CHECK-LABEL: store_promote_4i8:
+; CHECK: // %bb.0:
+; CHECK-NEXT: ptrue p0.s
+; CHECK-NEXT: st1b { z0.s }, p0, [x0]
+; CHECK-NEXT: ret
+ store <vscale x 4 x i8> %data, <vscale x 4 x i8>* %a
+ ret void
+}
+
+define void @store_split_i16(<vscale x 16 x i16> %data, <vscale x 16 x i16>* %a) {
+; CHECK-LABEL: store_split_i16:
+; CHECK: // %bb.0:
+; CHECK-NEXT: ptrue p0.h
+; CHECK-NEXT: st1h { z1.h }, p0, [x0, #1, mul vl]
+; CHECK-NEXT: st1h { z0.h }, p0, [x0]
+; CHECK-NEXT: ret
+ store <vscale x 16 x i16> %data, <vscale x 16 x i16>* %a
+ ret void
+}
+
+define void @store_split_16i32(<vscale x 16 x i32> %data, <vscale x 16 x i32>* %a) {
+; CHECK-LABEL: store_split_16i32:
+; CHECK: // %bb.0:
+; CHECK-NEXT: ptrue p0.s
+; CHECK-NEXT: st1w { z3.s }, p0, [x0, #3, mul vl]
+; CHECK-NEXT: st1w { z2.s }, p0, [x0, #2, mul vl]
+; CHECK-NEXT: st1w { z1.s }, p0, [x0, #1, mul vl]
+; CHECK-NEXT: st1w { z0.s }, p0, [x0]
+; CHECK-NEXT: ret
+ store <vscale x 16 x i32> %data, <vscale x 16 x i32>* %a
+ ret void
+}
+
+define void @store_split_16i64(<vscale x 16 x i64> %data, <vscale x 16 x i64>* %a) {
+; CHECK-LABEL: store_split_16i64:
+; CHECK: // %bb.0:
+; CHECK-NEXT: ptrue p0.d
+; CHECK-NEXT: st1d { z7.d }, p0, [x0, #7, mul vl]
+; CHECK-NEXT: st1d { z6.d }, p0, [x0, #6, mul vl]
+; CHECK-NEXT: st1d { z5.d }, p0, [x0, #5, mul vl]
+; CHECK-NEXT: st1d { z4.d }, p0, [x0, #4, mul vl]
+; CHECK-NEXT: st1d { z3.d }, p0, [x0, #3, mul vl]
+; CHECK-NEXT: st1d { z2.d }, p0, [x0, #2, mul vl]
+; CHECK-NEXT: st1d { z1.d }, p0, [x0, #1, mul vl]
+; CHECK-NEXT: st1d { z0.d }, p0, [x0]
+; CHECK-NEXT: ret
+ store <vscale x 16 x i64> %data, <vscale x 16 x i64>* %a
+ ret void
+}