MVT XLenVT = Subtarget.getXLenVT();
SDValue VL = VecVT.isFixedLengthVector()
? DAG.getConstant(VecVT.getVectorNumElements(), DL, XLenVT)
- : DAG.getRegister(RISCV::X0, XLenVT);
+ : DAG.getTargetConstant(RISCV::VLMaxSentinel, DL, XLenVT);
MVT MaskVT = MVT::getVectorVT(MVT::i1, ContainerVT.getVectorElementCount());
SDValue Mask = DAG.getNode(RISCVISD::VMSET_VL, DL, MaskVT, VL);
return {Mask, VL};
// Fall back to use a stack store and stride x0 vector load. Use X0 as VL.
return DAG.getNode(RISCVISD::SPLAT_VECTOR_SPLIT_I64_VL, DL, VecVT, Lo, Hi,
- DAG.getRegister(RISCV::X0, MVT::i64));
+ DAG.getTargetConstant(RISCV::VLMaxSentinel, DL, MVT::i64));
}
// Custom-lower extensions from mask vectors by using a vselect either with 1
PassThru = convertToScalableVector(ContainerVT, PassThru, DAG, Subtarget);
VL = DAG.getConstant(VT.getVectorNumElements(), DL, XLenVT);
} else
- VL = DAG.getRegister(RISCV::X0, XLenVT);
+ VL = DAG.getTargetConstant(RISCV::VLMaxSentinel, DL, XLenVT);
SDVTList VTs = DAG.getVTList({ContainerVT, MVT::Other});
SDValue IntID = DAG.getTargetConstant(Intrinsic::riscv_vle_mask, DL, XLenVT);
Mask = convertToScalableVector(MaskVT, Mask, DAG, Subtarget);
VL = DAG.getConstant(VT.getVectorNumElements(), DL, XLenVT);
} else
- VL = DAG.getRegister(RISCV::X0, XLenVT);
+ VL = DAG.getTargetConstant(RISCV::VLMaxSentinel, DL, XLenVT);
SDValue IntID = DAG.getTargetConstant(Intrinsic::riscv_vse_mask, DL, XLenVT);
return DAG.getMemIntrinsicNode(
VL = DAG.getConstant(VT.getVectorNumElements(), DL, XLenVT);
} else
- VL = DAG.getRegister(RISCV::X0, XLenVT);
+ VL = DAG.getTargetConstant(RISCV::VLMaxSentinel, DL, XLenVT);
unsigned IntID =
IsUnmasked ? Intrinsic::riscv_vluxei : Intrinsic::riscv_vluxei_mask;
VL = DAG.getConstant(VT.getVectorNumElements(), DL, XLenVT);
} else
- VL = DAG.getRegister(RISCV::X0, XLenVT);
+ VL = DAG.getTargetConstant(RISCV::VLMaxSentinel, DL, XLenVT);
unsigned IntID =
IsUnmasked ? Intrinsic::riscv_vsoxei : Intrinsic::riscv_vsoxei_mask;
if (RISCVII::hasVLOp(TSFlags)) {
const MachineOperand &VLOp = MI.getOperand(NumOperands - 2);
- if (VLOp.isImm())
- InstrInfo.setAVLImm(VLOp.getImm());
- else
+ if (VLOp.isImm()) {
+ int64_t Imm = VLOp.getImm();
+ // Conver the VLMax sentintel to X0 register.
+ if (Imm == RISCV::VLMaxSentinel)
+ InstrInfo.setAVLReg(RISCV::X0);
+ else
+ InstrInfo.setAVLImm(Imm);
+ } else {
InstrInfo.setAVLReg(VLOp.getReg());
+ }
} else
InstrInfo.setAVLReg(RISCV::NoRegister);
InstrInfo.setVTYPE(VLMul, SEW, /*TailAgnostic*/ TailAgnostic,
const RISCVSubtarget &STI;
};
+namespace RISCV {
+// Special immediate for AVL operand of V pseudo instructions to indicate VLMax.
+static constexpr int64_t VLMaxSentinel = -1LL;
+} // namespace RISCV
+
namespace RISCVVPseudosTable {
struct PseudoInfo {
// Operand that is allowed to be a register or a 5 bit immediate.
// This allows us to pick between VSETIVLI and VSETVLI opcodes using the same
// pseudo instructions.
-def AVL : RegisterOperand<GPR> {
+def AVL : RegisterOperand<GPRNoX0> {
let OperandNamespace = "RISCVOp";
let OperandType = "OPERAND_AVL";
}
def VLOpFrag : PatFrag<(ops), (XLenVT (VLOp (XLenVT AVL:$vl)))>;
// Output pattern for X0 used to represent VLMAX in the pseudo instructions.
-def VLMax : OutPatFrag<(ops), (XLenVT X0)>;
+// We can't use X0 register becuase the AVL operands use GPRNoX0.
+// This must be kept in sync with RISCV::VLMaxSentinel.
+def VLMax : OutPatFrag<(ops), (XLenVT -1)>;
// List of EEW.
defvar EEWList = [8, 16, 32, 64];
; CHECK: $x1 = LD $x2, 2024 :: (load (s64) from %stack.3)
; CHECK: $x2 = frame-destroy ADDI $x2, 2032
; CHECK: PseudoRET
- %1:gpr = COPY $x11
+ %1:gprnox0 = COPY $x11
%0:gpr = COPY $x10
%2:vr = PseudoVLE64_V_M1 %0, %1, 6 :: (load unknown-size from %ir.pa, align 8)
%3:gpr = ADDI %stack.2, 0
; CHECK: [[COPY:%[0-9]+]]:vr = COPY $v0
; CHECK: [[COPY1:%[0-9]+]]:vrnov0 = COPY $v1
; CHECK: [[COPY2:%[0-9]+]]:vrnov0 = COPY $v2
- ; CHECK: [[PseudoVNMSUB_VV_M1_:%[0-9]+]]:vr = PseudoVNMSUB_VV_M1 [[PseudoVNMSUB_VV_M1_]], [[COPY1]], [[COPY2]], $x0, 6, 1, implicit $vl, implicit $vtype
+ ; CHECK: [[PseudoVNMSUB_VV_M1_:%[0-9]+]]:vr = PseudoVNMSUB_VV_M1 [[PseudoVNMSUB_VV_M1_]], [[COPY1]], [[COPY2]], -1, 6, 1, implicit $vl, implicit $vtype
; CHECK: [[COPY2:%[0-9]+]]:vr = COPY [[PseudoVNMSUB_VV_M1_]]
; CHECK: dead [[COPY2]]:vr = PseudoVSLL_VI_M1 [[COPY2]], 11, $noreg, 6, implicit $vl, implicit $vtype
; CHECK: $v0 = COPY [[PseudoVNMSUB_VV_M1_]]
%0:vr = COPY $v0
%1:vrnov0 = COPY $v1
%2:vrnov0 = COPY $v2
- %0:vr = PseudoVNMSUB_VV_M1 %0, %1, killed %2, $x0, 6, 1, implicit $vl, implicit $vtype
+ %0:vr = PseudoVNMSUB_VV_M1 %0, %1, killed %2, -1, 6, 1, implicit $vl, implicit $vtype
%3:vr = COPY %0
%3:vr = PseudoVSLL_VI_M1 %3, 11, $noreg, 6, implicit $vl, implicit $vtype
$v0 = COPY %0
; CHECK: $v0 = COPY [[COPY]]
; CHECK: [[DEF:%[0-9]+]]:vrm8 = IMPLICIT_DEF
; CHECK: [[COPY2:%[0-9]+]]:vrm8nov0 = COPY [[DEF]]
- ; CHECK: [[PseudoVLE64_V_M8_MASK:%[0-9]+]]:vrm8nov0 = PseudoVLE64_V_M8_MASK [[COPY2]], [[COPY1]], $v0, $x0, 6 :: (load (s512) from %ir.a, align 8)
+ ; CHECK: [[PseudoVLE64_V_M8_MASK:%[0-9]+]]:vrm8nov0 = PseudoVLE64_V_M8_MASK [[COPY2]], [[COPY1]], $v0, -1, 6 :: (load (s512) from %ir.a, align 8)
; CHECK: $v8m8 = COPY [[PseudoVLE64_V_M8_MASK]]
; CHECK: PseudoRET implicit $v8m8
%1:vr = COPY $v0
$v0 = COPY %1
%3:vrm8 = IMPLICIT_DEF
%4:vrm8nov0 = COPY %3
- %2:vrm8nov0 = PseudoVLE64_V_M8_MASK %4, %0, $v0, $x0, 6 :: (load (s512) from %ir.a, align 8)
+ %2:vrm8nov0 = PseudoVLE64_V_M8_MASK %4, %0, $v0, -1, 6 :: (load (s512) from %ir.a, align 8)
$v8m8 = COPY %2
PseudoRET implicit $v8m8
- { id: 4, class: gpr }
- { id: 5, class: gpr }
- { id: 6, class: vr }
- - { id: 7, class: gpr }
+ - { id: 7, class: gprnox0 }
- { id: 8, class: gpr }
liveins:
- { reg: '$x10', virtual-reg: '%4' }
successors: %bb.2(0x30000000), %bb.1(0x50000000)
liveins: $x10, $x11, $v8, $x12
- %7:gpr = COPY $x12
+ %7:gprnox0 = COPY $x12
%6:vr = COPY $v8
%5:gpr = COPY $x11
%4:gpr = COPY $x10
- { id: 4, class: gpr }
- { id: 5, class: gpr }
- { id: 6, class: gpr }
- - { id: 7, class: gpr }
+ - { id: 7, class: gprnox0 }
- { id: 8, class: gpr }
liveins:
- { reg: '$x10', virtual-reg: '%4' }
successors: %bb.2(0x30000000), %bb.1(0x50000000)
liveins: $x10, $x11, $x12, $x13
- %7:gpr = COPY $x13
+ %7:gprnox0 = COPY $x13
%6:gpr = COPY $x12
%5:gpr = COPY $x11
%4:gpr = COPY $x10
- { id: 3, class: gpr }
- { id: 4, class: vr }
- { id: 5, class: vr }
- - { id: 6, class: gpr }
+ - { id: 6, class: gprnox0 }
- { id: 7, class: gpr }
- { id: 8, class: gpr }
liveins:
successors: %bb.2(0x30000000), %bb.1(0x50000000)
liveins: $x10, $v8, $v9, $x11
- %6:gpr = COPY $x11
+ %6:gprnox0 = COPY $x11
%5:vr = COPY $v9
%4:vr = COPY $v8
%3:gpr = COPY $x10
alignment: 4
tracksRegLiveness: true
registers:
- - { id: 0, class: gpr }
+ - { id: 0, class: gprnox0 }
- { id: 1, class: vr }
- { id: 2, class: vr }
- { id: 3, class: vr }
; CHECK: [[COPY1:%[0-9]+]]:vr = COPY $v9
; CHECK: [[COPY2:%[0-9]+]]:vr = COPY $v8
; CHECK: [[COPY3:%[0-9]+]]:gpr = COPY $x10
- ; CHECK: [[PseudoVSETVLI:%[0-9]+]]:gpr = PseudoVSETVLI [[COPY]], 88, implicit-def $vl, implicit-def $vtype
+ ; CHECK: [[PseudoVSETVLI:%[0-9]+]]:gprnox0 = PseudoVSETVLI [[COPY]], 88, implicit-def $vl, implicit-def $vtype
; CHECK: [[COPY4:%[0-9]+]]:gpr = COPY $x0
; CHECK: BEQ [[COPY3]], [[COPY4]], %bb.2
; CHECK: PseudoBR %bb.1
%6:vr = COPY $v9
%5:vr = COPY $v8
%4:gpr = COPY $x10
- %0:gpr = PseudoVSETVLI %7, 88, implicit-def dead $vl, implicit-def dead $vtype
+ %0:gprnox0 = PseudoVSETVLI %7, 88, implicit-def dead $vl, implicit-def dead $vtype
%8:gpr = COPY $x0
BEQ %4, %8, %bb.2
PseudoBR %bb.1
registers:
- { id: 0, class: vr }
- { id: 1, class: vr }
- - { id: 2, class: gpr }
+ - { id: 2, class: gprnox0 }
- { id: 3, class: vr }
liveins:
- { reg: '$v8', virtual-reg: '%0' }
; CHECK: [[PseudoVADD_VV_M1_:%[0-9]+]]:vr = PseudoVADD_VV_M1 [[COPY2]], [[COPY1]], $noreg, 6, implicit $vl, implicit $vtype
; CHECK: $v8 = COPY [[PseudoVADD_VV_M1_]]
; CHECK: PseudoRET implicit $v8
- %2:gpr = COPY $x10
+ %2:gprnox0 = COPY $x10
%1:vr = COPY $v9
%0:vr = COPY $v8
%3:vr = PseudoVADD_VV_M1 %0, %1, %2, 6
registers:
- { id: 0, class: gpr }
- { id: 1, class: vr }
- - { id: 2, class: gpr }
+ - { id: 2, class: gprnox0 }
- { id: 3, class: vr }
- { id: 4, class: vr }
liveins:
; CHECK: [[PseudoVADD_VV_M1_:%[0-9]+]]:vr = PseudoVADD_VV_M1 killed [[PseudoVLE64_V_M1_]], [[COPY1]], $noreg, 6, implicit $vl, implicit $vtype
; CHECK: $v8 = COPY [[PseudoVADD_VV_M1_]]
; CHECK: PseudoRET implicit $v8
- %2:gpr = COPY $x11
+ %2:gprnox0 = COPY $x11
%1:vr = COPY $v8
%0:gpr = COPY $x10
%3:vr = PseudoVLE64_V_M1 %0, %2, 6
tracksRegLiveness: true
registers:
- { id: 0, class: gpr }
- - { id: 1, class: gpr }
+ - { id: 1, class: gprnox0 }
- { id: 2, class: vr }
- { id: 3, class: vr }
liveins:
; CHECK: early-clobber %3:vr = PseudoVZEXT_VF2_M1 killed [[PseudoVLE32_V_MF2_]], $noreg, 6, implicit $vl, implicit $vtype
; CHECK: $v8 = COPY %3
; CHECK: PseudoRET implicit $v8
- %1:gpr = COPY $x11
+ %1:gprnox0 = COPY $x11
%0:gpr = COPY $x10
%2:vr = PseudoVLE32_V_MF2 %0, %1, 5
early-clobber %3:vr = PseudoVZEXT_VF2_M1 killed %2, %1, 6
; CHECK: dead $x0 = PseudoVSETIVLI 2, 88, implicit-def $vl, implicit-def $vtype
; CHECK: [[PseudoVLE64_V_M1_:%[0-9]+]]:vr = PseudoVLE64_V_M1 [[COPY]], 2, 6, implicit $vl, implicit $vtype :: (load (s128) from %ir.x)
; CHECK: dead %6:gpr = PseudoVSETVLIX0 $x0, 88, implicit-def $vl, implicit-def $vtype
- ; CHECK: [[PseudoVMV_V_I_M1_:%[0-9]+]]:vr = PseudoVMV_V_I_M1 0, $noreg, 6, implicit $vl, implicit $vtype
+ ; CHECK: [[PseudoVMV_V_I_M1_:%[0-9]+]]:vr = PseudoVMV_V_I_M1 0, -1, 6, implicit $vl, implicit $vtype
; CHECK: [[DEF:%[0-9]+]]:vr = IMPLICIT_DEF
; CHECK: dead $x0 = PseudoVSETIVLI 2, 88, implicit-def $vl, implicit-def $vtype
; CHECK: [[PseudoVREDSUM_VS_M1_:%[0-9]+]]:vr = PseudoVREDSUM_VS_M1 [[DEF]], killed [[PseudoVLE64_V_M1_]], killed [[PseudoVMV_V_I_M1_]], 2, 6, implicit $vl, implicit $vtype
; CHECK: PseudoRET implicit $x10
%0:gpr = COPY $x10
%1:vr = PseudoVLE64_V_M1 %0, 2, 6 :: (load (s128) from %ir.x)
- %2:vr = PseudoVMV_V_I_M1 0, $x0, 6
+ %2:vr = PseudoVMV_V_I_M1 0, -1, 6
%4:vr = IMPLICIT_DEF
%3:vr = PseudoVREDSUM_VS_M1 %4, killed %1, killed %2, 2, 6
%5:gpr = PseudoVMV_X_S_M1 killed %3, 6
- { id: 0, class: vr }
- { id: 1, class: vr }
- { id: 2, class: gprnox0 }
- - { id: 3, class: gpr }
+ - { id: 3, class: gprnox0 }
- { id: 4, class: vr }
liveins:
- { reg: '$v8', virtual-reg: '%0' }
; CHECK: [[COPY:%[0-9]+]]:gprnox0 = COPY $x10
; CHECK: [[COPY1:%[0-9]+]]:vr = COPY $v9
; CHECK: [[COPY2:%[0-9]+]]:vr = COPY $v8
- ; CHECK: [[PseudoVSETVLI:%[0-9]+]]:gpr = PseudoVSETVLI [[COPY]], 88, implicit-def $vl, implicit-def $vtype
+ ; CHECK: [[PseudoVSETVLI:%[0-9]+]]:gprnox0 = PseudoVSETVLI [[COPY]], 88, implicit-def $vl, implicit-def $vtype
; CHECK: [[PseudoVADD_VV_M1_:%[0-9]+]]:vr = PseudoVADD_VV_M1 [[COPY2]], [[COPY1]], $noreg, 6, implicit $vl, implicit $vtype
; CHECK: $v8 = COPY [[PseudoVADD_VV_M1_]]
; CHECK: PseudoRET implicit $v8
%2:gprnox0 = COPY $x10
%1:vr = COPY $v9
%0:vr = COPY $v8
- %3:gpr = PseudoVSETVLI %2, 88, implicit-def dead $vl, implicit-def dead $vtype
+ %3:gprnox0 = PseudoVSETVLI %2, 88, implicit-def dead $vl, implicit-def dead $vtype
%4:vr = PseudoVADD_VV_M1 %0, %1, killed %3, 6
$v8 = COPY %4
PseudoRET implicit $v8
registers:
- { id: 0, class: gpr }
- { id: 1, class: vr }
- - { id: 2, class: gpr }
+ - { id: 2, class: gprnox0 }
- { id: 3, class: vr }
- { id: 4, class: vr }
liveins:
; CHECK: [[PseudoVADD_VV_M1_:%[0-9]+]]:vr = PseudoVADD_VV_M1 killed [[PseudoVLE64_V_M1_]], [[COPY1]], $noreg, 6, implicit $vl, implicit $vtype
; CHECK: $v8 = COPY [[PseudoVADD_VV_M1_]]
; CHECK: PseudoRET implicit $v8
- %2:gpr = COPY $x11
+ %2:gprnox0 = COPY $x11
%1:vr = COPY $v8
%0:gpr = COPY $x10
%3:vr = PseudoVLE64_V_M1 %0, %2, 6
; CHECK: $x2 = frame-destroy ADDI $x2, 16
; CHECK: PseudoRET
%0:gpr = COPY $x10
- %1:gpr = COPY $x11
+ %1:gprnox0 = COPY $x11
$v0_v1_v2_v3_v4_v5_v6 = PseudoVLSEG7E64_V_M1 %0, %1, 6
PseudoVSPILL7_M1 killed renamable $v0_v1_v2_v3_v4_v5_v6, %stack.0, $x0
renamable $v7_v8_v9_v10_v11_v12_v13 = PseudoVRELOAD7_M1 %stack.0, $x0