case Hexagon::LDriw_mod:
Changed |= expandLoadInt(B, I, MRI, HII, NewRegs);
break;
- case Hexagon::STriq_pred_V6:
- case Hexagon::STriq_pred_V6_128B:
+ case Hexagon::PS_vstorerq_ai:
+ case Hexagon::PS_vstorerq_ai_128B:
Changed |= expandStoreVecPred(B, I, MRI, HII, NewRegs);
break;
- case Hexagon::LDriq_pred_V6:
- case Hexagon::LDriq_pred_V6_128B:
+ case Hexagon::PS_vloadrq_ai:
+ case Hexagon::PS_vloadrq_ai_128B:
Changed |= expandLoadVecPred(B, I, MRI, HII, NewRegs);
break;
case Hexagon::PS_vloadrw_ai:
case Hexagon::V6_vL32Ub_ai_128B:
case Hexagon::LDriw_pred:
case Hexagon::LDriw_mod:
- case Hexagon::LDriq_pred_V6:
- case Hexagon::LDriq_pred_vec_V6:
+ case Hexagon::PS_vloadrq_ai:
case Hexagon::PS_vloadrw_ai:
- case Hexagon::LDriq_pred_V6_128B:
- case Hexagon::LDriq_pred_vec_V6_128B:
+ case Hexagon::PS_vloadrq_ai_128B:
case Hexagon::PS_vloadrw_ai_128B: {
const MachineOperand OpFI = MI.getOperand(1);
if (!OpFI.isFI())
case Hexagon::V6_vS32Ub_ai_128B:
case Hexagon::STriw_pred:
case Hexagon::STriw_mod:
- case Hexagon::STriq_pred_V6:
- case Hexagon::STriq_pred_vec_V6:
+ case Hexagon::PS_vstorerq_ai:
case Hexagon::PS_vstorerw_ai:
- case Hexagon::STriq_pred_V6_128B:
- case Hexagon::STriq_pred_vec_V6_128B:
+ case Hexagon::PS_vstorerq_ai_128B:
case Hexagon::PS_vstorerw_ai_128B: {
const MachineOperand &OpFI = MI.getOperand(0);
if (!OpFI.isFI())
.addFrameIndex(FI).addImm(0)
.addReg(SrcReg, KillFlag).addMemOperand(MMO);
} else if (Hexagon::VecPredRegs128BRegClass.hasSubClassEq(RC)) {
- BuildMI(MBB, I, DL, get(Hexagon::STriq_pred_V6_128B))
+ BuildMI(MBB, I, DL, get(Hexagon::PS_vstorerq_ai_128B))
.addFrameIndex(FI).addImm(0)
.addReg(SrcReg, KillFlag).addMemOperand(MMO);
} else if (Hexagon::VecPredRegsRegClass.hasSubClassEq(RC)) {
- BuildMI(MBB, I, DL, get(Hexagon::STriq_pred_V6))
+ BuildMI(MBB, I, DL, get(Hexagon::PS_vstorerq_ai))
.addFrameIndex(FI).addImm(0)
.addReg(SrcReg, KillFlag).addMemOperand(MMO);
} else if (Hexagon::VectorRegs128BRegClass.hasSubClassEq(RC)) {
BuildMI(MBB, I, DL, get(Hexagon::LDriw_mod), DestReg)
.addFrameIndex(FI).addImm(0).addMemOperand(MMO);
} else if (Hexagon::VecPredRegs128BRegClass.hasSubClassEq(RC)) {
- BuildMI(MBB, I, DL, get(Hexagon::LDriq_pred_V6_128B), DestReg)
+ BuildMI(MBB, I, DL, get(Hexagon::PS_vloadrq_ai_128B), DestReg)
.addFrameIndex(FI).addImm(0).addMemOperand(MMO);
} else if (Hexagon::VecPredRegsRegClass.hasSubClassEq(RC)) {
- BuildMI(MBB, I, DL, get(Hexagon::LDriq_pred_V6), DestReg)
+ BuildMI(MBB, I, DL, get(Hexagon::PS_vloadrq_ai), DestReg)
.addFrameIndex(FI).addImm(0).addMemOperand(MMO);
} else if (Hexagon::VecDblRegs128BRegClass.hasSubClassEq(RC)) {
unsigned Opc = Align < 128 ? Hexagon::PS_vloadrwu_ai_128B
// misaligns with respect to load size.
switch (Opcode) {
- case Hexagon::STriq_pred_V6:
- case Hexagon::STriq_pred_vec_V6:
+ case Hexagon::PS_vstorerq_ai:
case Hexagon::PS_vstorerw_ai:
- case Hexagon::LDriq_pred_V6:
- case Hexagon::LDriq_pred_vec_V6:
+ case Hexagon::PS_vloadrq_ai:
case Hexagon::PS_vloadrw_ai:
case Hexagon::V6_vL32b_ai:
case Hexagon::V6_vS32b_ai:
return (Offset >= Hexagon_MEMV_OFFSET_MIN) &&
(Offset <= Hexagon_MEMV_OFFSET_MAX);
- case Hexagon::STriq_pred_V6_128B:
- case Hexagon::STriq_pred_vec_V6_128B:
+ case Hexagon::PS_vstorerq_ai_128B:
case Hexagon::PS_vstorerw_ai_128B:
- case Hexagon::LDriq_pred_V6_128B:
- case Hexagon::LDriq_pred_vec_V6_128B:
+ case Hexagon::PS_vloadrq_ai_128B:
case Hexagon::PS_vloadrw_ai_128B:
case Hexagon::V6_vL32b_ai_128B:
case Hexagon::V6_vS32b_ai_128B:
// Store vector predicate pseudo.
let isExtendable = 1, opExtendable = 1, isExtentSigned = 1, opExtentBits = 13,
isCodeGenOnly = 1, isPseudo = 1, mayStore = 1, hasSideEffects = 0 in {
-def STriq_pred_V6 : STInst<(outs),
- (ins IntRegs:$base, s32Imm:$offset, VecPredRegs:$src1),
- ".error \"should not emit\" ",
- []>,
- Requires<[HasV60T,UseHVXSgl]>;
-
-def STriq_pred_vec_V6 : STInst<(outs),
- (ins IntRegs:$base, s32Imm:$offset, VectorRegs:$src1),
- ".error \"should not emit\" ",
- []>,
- Requires<[HasV60T,UseHVXSgl]>;
-
-def STriq_pred_V6_128B : STInst<(outs),
- (ins IntRegs:$base, s32Imm:$offset, VecPredRegs128B:$src1),
- ".error \"should not emit\" ",
- []>,
- Requires<[HasV60T,UseHVXDbl]>;
-
-def STriq_pred_vec_V6_128B : STInst<(outs),
- (ins IntRegs:$base, s32Imm:$offset, VectorRegs128B:$src1),
- ".error \"should not emit\" ",
- []>,
- Requires<[HasV60T,UseHVXDbl]>;
+ def PS_vstorerq_ai : STInst<(outs),
+ (ins IntRegs:$base, s32Imm:$offset, VecPredRegs:$src1),
+ ".error \"should not emit\"", []>,
+ Requires<[HasV60T,UseHVXSgl]>;
+ def PS_vstorerq_ai_128B : STInst<(outs),
+ (ins IntRegs:$base, s32Imm:$offset, VecPredRegs128B:$src1),
+ ".error \"should not emit\"", []>,
+ Requires<[HasV60T,UseHVXDbl]>;
}
// Load vector predicate pseudo.
let isExtendable = 1, opExtendable = 2, isExtentSigned = 1, opExtentBits = 13,
opExtentAlign = 2, isCodeGenOnly = 1, isPseudo = 1, hasSideEffects = 0 in {
-def LDriq_pred_V6 : LDInst<(outs VecPredRegs:$dst),
- (ins IntRegs:$base, s32Imm:$offset),
- ".error \"should not emit\" ",
- []>,
- Requires<[HasV60T,UseHVXSgl]>;
-def LDriq_pred_vec_V6 : LDInst<(outs VectorRegs:$dst),
- (ins IntRegs:$base, s32Imm:$offset),
- ".error \"should not emit\" ",
- []>,
- Requires<[HasV60T,UseHVXSgl]>;
-def LDriq_pred_V6_128B : LDInst<(outs VecPredRegs128B:$dst),
- (ins IntRegs:$base, s32Imm:$offset),
- ".error \"should not emit\" ",
- []>,
- Requires<[HasV60T,UseHVXDbl]>;
-def LDriq_pred_vec_V6_128B : LDInst<(outs VectorRegs128B:$dst),
- (ins IntRegs:$base, s32Imm:$offset),
- ".error \"should not emit\" ",
- []>,
- Requires<[HasV60T,UseHVXDbl]>;
+ def PS_vloadrq_ai : LDInst<(outs VecPredRegs:$dst),
+ (ins IntRegs:$base, s32Imm:$offset),
+ ".error \"should not emit\"", []>,
+ Requires<[HasV60T,UseHVXSgl]>;
+ def PS_vloadrq_ai_128B : LDInst<(outs VecPredRegs128B:$dst),
+ (ins IntRegs:$base, s32Imm:$offset),
+ ".error \"should not emit\"", []>,
+ Requires<[HasV60T,UseHVXDbl]>;
}
class VSELInst<dag outs, dag ins, string asmstr, list<dag> pattern = [],