Info.size = MemoryLocation::UnknownSize;
Info.flags |= MachineMemOperand::MOLoad;
return true;
+ case Intrinsic::riscv_seg2_store:
+ case Intrinsic::riscv_seg3_store:
+ case Intrinsic::riscv_seg4_store:
+ case Intrinsic::riscv_seg5_store:
+ case Intrinsic::riscv_seg6_store:
+ case Intrinsic::riscv_seg7_store:
+ case Intrinsic::riscv_seg8_store:
+ Info.opc = ISD::INTRINSIC_VOID;
+ // Operands are (vec, ..., vec, ptr, vl, int_id)
+ Info.ptrVal = I.getArgOperand(I.getNumOperands() - 3);
+ Info.memVT =
+ getValueType(DL, I.getArgOperand(0)->getType()->getScalarType());
+ Info.align = Align(
+ DL.getTypeSizeInBits(I.getArgOperand(0)->getType()->getScalarType()) /
+ 8);
+ Info.size = MemoryLocation::UnknownSize;
+ Info.flags |= MachineMemOperand::MOStore;
+ return true;
}
}
Ops, Store->getMemoryVT(),
Store->getMemOperand());
}
+ case Intrinsic::riscv_seg2_store:
+ case Intrinsic::riscv_seg3_store:
+ case Intrinsic::riscv_seg4_store:
+ case Intrinsic::riscv_seg5_store:
+ case Intrinsic::riscv_seg6_store:
+ case Intrinsic::riscv_seg7_store:
+ case Intrinsic::riscv_seg8_store: {
+ SDLoc DL(Op);
+ static const Intrinsic::ID VssegInts[] = {
+ Intrinsic::riscv_vsseg2, Intrinsic::riscv_vsseg3,
+ Intrinsic::riscv_vsseg4, Intrinsic::riscv_vsseg5,
+ Intrinsic::riscv_vsseg6, Intrinsic::riscv_vsseg7,
+ Intrinsic::riscv_vsseg8};
+ // Operands are (chain, int_id, vec*, ptr, vl)
+ unsigned NF = Op->getNumOperands() - 4;
+ assert(NF >= 2 && NF <= 8 && "Unexpected seg number");
+ MVT XLenVT = Subtarget.getXLenVT();
+ MVT VT = Op->getOperand(2).getSimpleValueType();
+ MVT ContainerVT = getContainerForFixedLengthVector(VT);
+
+ SDValue VL = getVLOp(VT.getVectorNumElements(), DL, DAG, Subtarget);
+ SDValue IntID = DAG.getTargetConstant(VssegInts[NF - 2], DL, XLenVT);
+ SDValue Ptr = Op->getOperand(NF + 2);
+
+ auto *FixedIntrinsic = cast<MemIntrinsicSDNode>(Op);
+ SmallVector<SDValue, 12> Ops = {FixedIntrinsic->getChain(), IntID};
+ for (unsigned i = 0; i < NF; i++)
+ Ops.push_back(convertToScalableVector(
+ ContainerVT, FixedIntrinsic->getOperand(2 + i), DAG, Subtarget));
+ Ops.append({Ptr, VL});
+
+ return DAG.getMemIntrinsicNode(
+ ISD::INTRINSIC_VOID, DL, DAG.getVTList(MVT::Other), Ops,
+ FixedIntrinsic->getMemoryVT(), FixedIntrinsic->getMemOperand());
+ }
}
return SDValue();
--- /dev/null
+; NOTE: Assertions have been autogenerated by utils/update_llc_test_checks.py
+; RUN: sed 's/iXLen/i32/g' %s | llc -mtriple=riscv32 -mattr=+v -verify-machineinstrs | FileCheck %s -check-prefixes=CHECK,RV32
+; RUN: sed 's/iXLen/i64/g' %s |llc -mtriple=riscv64 -mattr=+v -verify-machineinstrs | FileCheck %s -check-prefixes=CHECK,RV64
+
+declare void @llvm.riscv.seg2.store.v8i8.p0.iXLen(<8 x i8>, <8 x i8>, ptr, iXLen)
+define void @store_factor2(<8 x i8> %v0, <8 x i8> %v1, ptr %ptr) {
+; CHECK-LABEL: store_factor2:
+; CHECK: # %bb.0:
+; CHECK-NEXT: # kill: def $v9 killed $v9 killed $v8_v9 def $v8_v9
+; CHECK-NEXT: vsetivli zero, 8, e8, mf2, ta, ma
+; CHECK-NEXT: # kill: def $v8 killed $v8 killed $v8_v9 def $v8_v9
+; CHECK-NEXT: vsseg2e8.v v8, (a0)
+; CHECK-NEXT: ret
+ call void @llvm.riscv.seg2.store.v8i8.p0.iXLen(<8 x i8> %v0, <8 x i8> %v1, ptr %ptr, iXLen 8)
+ ret void
+}
+
+declare void @llvm.riscv.seg3.store.v8i8.p0.iXLen(<8 x i8>, <8 x i8>, <8 x i8>, ptr, iXLen)
+define void @store_factor3(<8 x i8> %v0, <8 x i8> %v1, <8 x i8> %v2, ptr %ptr) {
+; CHECK-LABEL: store_factor3:
+; CHECK: # %bb.0:
+; CHECK-NEXT: # kill: def $v10 killed $v10 killed $v8_v9_v10 def $v8_v9_v10
+; CHECK-NEXT: # kill: def $v9 killed $v9 killed $v8_v9_v10 def $v8_v9_v10
+; CHECK-NEXT: vsetivli zero, 8, e8, mf2, ta, ma
+; CHECK-NEXT: # kill: def $v8 killed $v8 killed $v8_v9_v10 def $v8_v9_v10
+; CHECK-NEXT: vsseg3e8.v v8, (a0)
+; CHECK-NEXT: ret
+ call void @llvm.riscv.seg3.store.v8i8.p0.iXLen(<8 x i8> %v0, <8 x i8> %v1, <8 x i8> %v2, ptr %ptr, iXLen 8)
+ ret void
+}
+
+declare void @llvm.riscv.seg4.store.v8i8.p0.iXLen(<8 x i8>, <8 x i8>, <8 x i8>, <8 x i8>, ptr, iXLen)
+define void @store_factor4(<8 x i8> %v0, <8 x i8> %v1, <8 x i8> %v2, <8 x i8> %v3, ptr %ptr) {
+; CHECK-LABEL: store_factor4:
+; CHECK: # %bb.0:
+; CHECK-NEXT: # kill: def $v11 killed $v11 killed $v8_v9_v10_v11 def $v8_v9_v10_v11
+; CHECK-NEXT: # kill: def $v10 killed $v10 killed $v8_v9_v10_v11 def $v8_v9_v10_v11
+; CHECK-NEXT: # kill: def $v9 killed $v9 killed $v8_v9_v10_v11 def $v8_v9_v10_v11
+; CHECK-NEXT: vsetivli zero, 8, e8, mf2, ta, ma
+; CHECK-NEXT: # kill: def $v8 killed $v8 killed $v8_v9_v10_v11 def $v8_v9_v10_v11
+; CHECK-NEXT: vsseg4e8.v v8, (a0)
+; CHECK-NEXT: ret
+ call void @llvm.riscv.seg4.store.v8i8.p0.iXLen(<8 x i8> %v0, <8 x i8> %v1, <8 x i8> %v2, <8 x i8> %v3, ptr %ptr, iXLen 8)
+ ret void
+}
+
+declare void @llvm.riscv.seg5.store.v8i8.p0.iXLen(<8 x i8>, <8 x i8>, <8 x i8>, <8 x i8>, <8 x i8>, ptr, iXLen)
+define void @store_factor5(<8 x i8> %v0, <8 x i8> %v1, <8 x i8> %v2, <8 x i8> %v3, <8 x i8> %v4, ptr %ptr) {
+; CHECK-LABEL: store_factor5:
+; CHECK: # %bb.0:
+; CHECK-NEXT: # kill: def $v12 killed $v12 killed $v8_v9_v10_v11_v12 def $v8_v9_v10_v11_v12
+; CHECK-NEXT: # kill: def $v11 killed $v11 killed $v8_v9_v10_v11_v12 def $v8_v9_v10_v11_v12
+; CHECK-NEXT: # kill: def $v10 killed $v10 killed $v8_v9_v10_v11_v12 def $v8_v9_v10_v11_v12
+; CHECK-NEXT: # kill: def $v9 killed $v9 killed $v8_v9_v10_v11_v12 def $v8_v9_v10_v11_v12
+; CHECK-NEXT: vsetivli zero, 8, e8, mf2, ta, ma
+; CHECK-NEXT: # kill: def $v8 killed $v8 killed $v8_v9_v10_v11_v12 def $v8_v9_v10_v11_v12
+; CHECK-NEXT: vsseg5e8.v v8, (a0)
+; CHECK-NEXT: ret
+ call void @llvm.riscv.seg5.store.v8i8.p0.iXLen(<8 x i8> %v0, <8 x i8> %v1, <8 x i8> %v2, <8 x i8> %v3, <8 x i8> %v4, ptr %ptr, iXLen 8)
+ ret void
+}
+
+declare void @llvm.riscv.seg6.store.v8i8.p0.iXLen(<8 x i8>, <8 x i8>, <8 x i8>, <8 x i8>, <8 x i8>, <8 x i8>, ptr, iXLen)
+define void @store_factor6(<8 x i8> %v0, <8 x i8> %v1, <8 x i8> %v2, <8 x i8> %v3, <8 x i8> %v4, <8 x i8> %v5, ptr %ptr) {
+; CHECK-LABEL: store_factor6:
+; CHECK: # %bb.0:
+; CHECK-NEXT: # kill: def $v13 killed $v13 killed $v8_v9_v10_v11_v12_v13 def $v8_v9_v10_v11_v12_v13
+; CHECK-NEXT: # kill: def $v12 killed $v12 killed $v8_v9_v10_v11_v12_v13 def $v8_v9_v10_v11_v12_v13
+; CHECK-NEXT: # kill: def $v11 killed $v11 killed $v8_v9_v10_v11_v12_v13 def $v8_v9_v10_v11_v12_v13
+; CHECK-NEXT: # kill: def $v10 killed $v10 killed $v8_v9_v10_v11_v12_v13 def $v8_v9_v10_v11_v12_v13
+; CHECK-NEXT: # kill: def $v9 killed $v9 killed $v8_v9_v10_v11_v12_v13 def $v8_v9_v10_v11_v12_v13
+; CHECK-NEXT: vsetivli zero, 8, e8, mf2, ta, ma
+; CHECK-NEXT: # kill: def $v8 killed $v8 killed $v8_v9_v10_v11_v12_v13 def $v8_v9_v10_v11_v12_v13
+; CHECK-NEXT: vsseg6e8.v v8, (a0)
+; CHECK-NEXT: ret
+ call void @llvm.riscv.seg6.store.v8i8.p0.iXLen(<8 x i8> %v0, <8 x i8> %v1, <8 x i8> %v2, <8 x i8> %v3, <8 x i8> %v4, <8 x i8> %v5, ptr %ptr, iXLen 8)
+ ret void
+}
+
+declare void @llvm.riscv.seg7.store.v8i8.p0.iXLen(<8 x i8>, <8 x i8>, <8 x i8>, <8 x i8>, <8 x i8>, <8 x i8>, <8x i8>, ptr, iXLen)
+define void @store_factor7(<8 x i8> %v0, <8 x i8> %v1, <8 x i8> %v2, <8 x i8> %v3, <8 x i8> %v4, <8 x i8> %v5, <8 x i8> %v6, ptr %ptr) {
+; CHECK-LABEL: store_factor7:
+; CHECK: # %bb.0:
+; CHECK-NEXT: # kill: def $v14 killed $v14 killed $v8_v9_v10_v11_v12_v13_v14 def $v8_v9_v10_v11_v12_v13_v14
+; CHECK-NEXT: # kill: def $v13 killed $v13 killed $v8_v9_v10_v11_v12_v13_v14 def $v8_v9_v10_v11_v12_v13_v14
+; CHECK-NEXT: # kill: def $v12 killed $v12 killed $v8_v9_v10_v11_v12_v13_v14 def $v8_v9_v10_v11_v12_v13_v14
+; CHECK-NEXT: # kill: def $v11 killed $v11 killed $v8_v9_v10_v11_v12_v13_v14 def $v8_v9_v10_v11_v12_v13_v14
+; CHECK-NEXT: # kill: def $v10 killed $v10 killed $v8_v9_v10_v11_v12_v13_v14 def $v8_v9_v10_v11_v12_v13_v14
+; CHECK-NEXT: # kill: def $v9 killed $v9 killed $v8_v9_v10_v11_v12_v13_v14 def $v8_v9_v10_v11_v12_v13_v14
+; CHECK-NEXT: vsetivli zero, 8, e8, mf2, ta, ma
+; CHECK-NEXT: # kill: def $v8 killed $v8 killed $v8_v9_v10_v11_v12_v13_v14 def $v8_v9_v10_v11_v12_v13_v14
+; CHECK-NEXT: vsseg7e8.v v8, (a0)
+; CHECK-NEXT: ret
+ call void @llvm.riscv.seg7.store.v8i8.p0.iXLen(<8 x i8> %v0, <8 x i8> %v1, <8 x i8> %v2, <8 x i8> %v3, <8 x i8> %v4, <8 x i8> %v5, <8 x i8> %v6, ptr %ptr, iXLen 8)
+ ret void
+}
+
+declare void @llvm.riscv.seg8.store.v8i8.p0.iXLen(<8 x i8>, <8 x i8>, <8 x i8>, <8 x i8>, <8 x i8>, <8 x i8>, <8 x i8>, <8 x i8>, ptr, iXLen)
+define void @store_factor8(<8 x i8> %v0, <8 x i8> %v1, <8 x i8> %v2, <8 x i8> %v3, <8 x i8> %v4, <8 x i8> %v5, <8 x i8> %v6, <8 x i8> %v7, ptr %ptr) {
+; CHECK-LABEL: store_factor8:
+; CHECK: # %bb.0:
+; CHECK-NEXT: # kill: def $v15 killed $v15 killed $v8_v9_v10_v11_v12_v13_v14_v15 def $v8_v9_v10_v11_v12_v13_v14_v15
+; CHECK-NEXT: # kill: def $v14 killed $v14 killed $v8_v9_v10_v11_v12_v13_v14_v15 def $v8_v9_v10_v11_v12_v13_v14_v15
+; CHECK-NEXT: # kill: def $v13 killed $v13 killed $v8_v9_v10_v11_v12_v13_v14_v15 def $v8_v9_v10_v11_v12_v13_v14_v15
+; CHECK-NEXT: # kill: def $v12 killed $v12 killed $v8_v9_v10_v11_v12_v13_v14_v15 def $v8_v9_v10_v11_v12_v13_v14_v15
+; CHECK-NEXT: # kill: def $v11 killed $v11 killed $v8_v9_v10_v11_v12_v13_v14_v15 def $v8_v9_v10_v11_v12_v13_v14_v15
+; CHECK-NEXT: # kill: def $v10 killed $v10 killed $v8_v9_v10_v11_v12_v13_v14_v15 def $v8_v9_v10_v11_v12_v13_v14_v15
+; CHECK-NEXT: # kill: def $v9 killed $v9 killed $v8_v9_v10_v11_v12_v13_v14_v15 def $v8_v9_v10_v11_v12_v13_v14_v15
+; CHECK-NEXT: vsetivli zero, 8, e8, mf2, ta, ma
+; CHECK-NEXT: # kill: def $v8 killed $v8 killed $v8_v9_v10_v11_v12_v13_v14_v15 def $v8_v9_v10_v11_v12_v13_v14_v15
+; CHECK-NEXT: vsseg8e8.v v8, (a0)
+; CHECK-NEXT: ret
+ call void @llvm.riscv.seg8.store.v8i8.p0.iXLen(<8 x i8> %v0, <8 x i8> %v1, <8 x i8> %v2, <8 x i8> %v3, <8 x i8> %v4, <8 x i8> %v5, <8 x i8> %v6, <8 x i8> %v7, ptr %ptr, iXLen 8)
+ ret void
+}
+;; NOTE: These prefixes are unused and the list is autogenerated. Do not add tests below this line:
+; RV32: {{.*}}
+; RV64: {{.*}}