}
void addRegWithInputModsOperands(MCInst &Inst, unsigned N) const {
- Inst.addOperand(MCOperand::createImm(
- Reg.Modifiers == -1 ? 0 : Reg.Modifiers));
+ Inst.addOperand(MCOperand::createImm(Reg.Modifiers));
addRegOperands(Inst, N);
}
}
bool isReg() const override {
- return Kind == Register && Reg.Modifiers == -1;
+ return Kind == Register && Reg.Modifiers == 0;
}
bool isRegWithInputMods() const {
- return Kind == Register && (Reg.IsForcedVOP3 || Reg.Modifiers != -1);
+ return Kind == Register;
+ }
+
+ bool isClamp() const {
+ return isImm() && Imm.Type == ImmTyClamp;
+ }
+
+ bool isOMod() const {
+ return isImm() && Imm.Type == ImmTyOMod;
+ }
+
+ bool isMod() const {
+ return isClamp() || isOMod();
}
void setModifiers(unsigned Mods) {
bool hasModifiers() const {
assert(isRegKind());
- return Reg.Modifiers != -1;
+ return Reg.Modifiers != 0;
}
unsigned getReg() const override {
}
bool isRegClass(unsigned RCID) const {
- return Reg.TRI->getRegClass(RCID).contains(getReg());
+ return isReg() && Reg.TRI->getRegClass(RCID).contains(getReg());
}
bool isSCSrc32() const {
Op->Reg.RegNo = RegNo;
Op->Reg.TRI = TRI;
Op->Reg.STI = STI;
- Op->Reg.Modifiers = -1;
+ Op->Reg.Modifiers = 0;
Op->Reg.IsForcedVOP3 = ForceVOP3;
Op->StartLoc = S;
Op->EndLoc = E;
OperandMatchResultTy parseUNorm(OperandVector &Operands);
OperandMatchResultTy parseR128(OperandVector &Operands);
+ void cvtId(MCInst &Inst, const OperandVector &Operands);
+ void cvtVOP3_2_mod(MCInst &Inst, const OperandVector &Operands);
+ void cvtVOP3_2_nomod(MCInst &Inst, const OperandVector &Operands);
+ void cvtVOP3_only(MCInst &Inst, const OperandVector &Operands);
void cvtVOP3(MCInst &Inst, const OperandVector &Operands);
OperandMatchResultTy parseVOP3OptionalOps(OperandVector &Operands);
};
// If we are parsing after we reach EndOfStatement then this means we
// are appending default values to the Operands list. This is only done
// by custom parser, so we shouldn't continue on to the generic parsing.
- if (ResTy == MatchOperand_Success || ResTy == MatchOperand_ParseFail ||
+ if (ResTy == MatchOperand_Success || ResTy == MatchOperand_ParseFail||
getLexer().is(AsmToken::EndOfStatement))
return ResTy;
SMLoc S, E;
unsigned RegNo;
if (!ParseRegister(RegNo, S, E)) {
-
- bool HasModifiers = operandsHaveModifiers(Operands);
unsigned Modifiers = 0;
if (Negate)
Modifiers |= 0x2;
}
- if (Modifiers && !HasModifiers) {
- // We are adding a modifier to src1 or src2 and previous sources
- // don't have modifiers, so we need to go back and empty modifers
- // for each previous source.
- for (unsigned PrevRegIdx = Operands.size() - 1; PrevRegIdx > 1;
- --PrevRegIdx) {
-
- AMDGPUOperand &RegOp = ((AMDGPUOperand&)*Operands[PrevRegIdx]);
- RegOp.setModifiers(0);
- }
- }
-
-
Operands.push_back(AMDGPUOperand::CreateReg(
RegNo, S, E, getContext().getRegisterInfo(), &getSTI(),
isForcedVOP3()));
- if (HasModifiers || Modifiers) {
+ if (Modifiers) {
AMDGPUOperand &RegOp = ((AMDGPUOperand&)*Operands[Operands.size() - 1]);
RegOp.setModifiers(Modifiers);
-
}
- } else {
- Operands.push_back(AMDGPUOperand::CreateToken(Parser.getTok().getString(),
- S));
- Parser.Lex();
- }
- return MatchOperand_Success;
+ } else {
+ ResTy = parseVOP3OptionalOps(Operands);
+ if (ResTy == MatchOperand_NoMatch) {
+ Operands.push_back(AMDGPUOperand::CreateToken(Parser.getTok().getString(),
+ S));
+ Parser.Lex();
+ }
+ }
+ return MatchOperand_Success;
}
default:
return MatchOperand_NoMatch;
if (operandsHaveModifiers(Operands))
return true;
- AMDGPUOperand &DstOp = ((AMDGPUOperand&)*Operands[1]);
+ if (Operands.size() >= 2) {
+ AMDGPUOperand &DstOp = ((AMDGPUOperand&)*Operands[1]);
- if (DstOp.isReg() && DstOp.isRegClass(AMDGPU::SGPR_64RegClassID))
- return true;
+ if (DstOp.isReg() && DstOp.isRegClass(AMDGPU::SGPR_64RegClassID))
+ return true;
+ }
if (Operands.size() >= 5)
return true;
return MatchOperand_NoMatch;
}
-void AMDGPUAsmParser::cvtVOP3(MCInst &Inst, const OperandVector &Operands) {
-
- unsigned i = 1;
+void AMDGPUAsmParser::cvtId(MCInst &Inst, const OperandVector &Operands) {
+ unsigned I = 1;
const MCInstrDesc &Desc = MII.get(Inst.getOpcode());
if (Desc.getNumDefs() > 0) {
- ((AMDGPUOperand &)*Operands[i++]).addRegOperands(Inst, 1);
+ ((AMDGPUOperand &)*Operands[I++]).addRegOperands(Inst, 1);
}
+ for (unsigned E = Operands.size(); I != E; ++I)
+ ((AMDGPUOperand &)*Operands[I]).addRegOrImmOperands(Inst, 1);
+}
- std::map<enum AMDGPUOperand::ImmTy, unsigned> OptionalIdx;
+void AMDGPUAsmParser::cvtVOP3_2_mod(MCInst &Inst, const OperandVector &Operands) {
+ if (operandsHaveModifiers(Operands) || isForcedVOP3()) {
+ cvtVOP3(Inst, Operands);
+ } else {
+ cvtId(Inst, Operands);
+ }
+}
+void AMDGPUAsmParser::cvtVOP3_2_nomod(MCInst &Inst, const OperandVector &Operands) {
if (operandsHaveModifiers(Operands)) {
- for (unsigned e = Operands.size(); i != e; ++i) {
- AMDGPUOperand &Op = ((AMDGPUOperand &)*Operands[i]);
+ cvtVOP3(Inst, Operands);
+ } else {
+ cvtId(Inst, Operands);
+ }
+}
- if (Op.isRegWithInputMods()) {
- ((AMDGPUOperand &)*Operands[i]).addRegWithInputModsOperands(Inst, 2);
- continue;
- }
- OptionalIdx[Op.getImmTy()] = i;
- }
+void AMDGPUAsmParser::cvtVOP3_only(MCInst &Inst, const OperandVector &Operands) {
+ cvtVOP3(Inst, Operands);
+}
- unsigned ClampIdx = OptionalIdx[AMDGPUOperand::ImmTyClamp];
- unsigned OModIdx = OptionalIdx[AMDGPUOperand::ImmTyOMod];
+void AMDGPUAsmParser::cvtVOP3(MCInst &Inst, const OperandVector &Operands) {
+ unsigned I = 1;
+ const MCInstrDesc &Desc = MII.get(Inst.getOpcode());
+ if (Desc.getNumDefs() > 0) {
+ ((AMDGPUOperand &)*Operands[I++]).addRegOperands(Inst, 1);
+ }
+
+ unsigned ClampIdx = 0, OModIdx = 0;
+ for (unsigned E = Operands.size(); I != E; ++I) {
+ AMDGPUOperand &Op = ((AMDGPUOperand &)*Operands[I]);
+ if (Op.isRegWithInputMods()) {
+ Op.addRegWithInputModsOperands(Inst, 2);
+ } else if (Op.isClamp()) {
+ ClampIdx = I;
+ } else if (Op.isOMod()) {
+ OModIdx = I;
+ } else if (Op.isImm()) {
+ Op.addImmOperands(Inst, 1);
+ } else {
+ assert(false);
+ }
+ }
- ((AMDGPUOperand &)*Operands[ClampIdx]).addImmOperands(Inst, 1);
- ((AMDGPUOperand &)*Operands[OModIdx]).addImmOperands(Inst, 1);
+ if (ClampIdx) {
+ AMDGPUOperand &Op = ((AMDGPUOperand &)*Operands[ClampIdx]);
+ Op.addImmOperands(Inst, 1);
+ } else {
+ Inst.addOperand(MCOperand::createImm(0));
+ }
+ if (OModIdx) {
+ AMDGPUOperand &Op = ((AMDGPUOperand &)*Operands[OModIdx]);
+ Op.addImmOperands(Inst, 1);
} else {
- for (unsigned e = Operands.size(); i != e; ++i)
- ((AMDGPUOperand &)*Operands[i]).addRegOrImmOperands(Inst, 1);
+ Inst.addOperand(MCOperand::createImm(0));
}
}
let Size = 4;
}
-class VOP3Common <dag outs, dag ins, string asm, list<dag> pattern> :
+class VOP3Common <dag outs, dag ins, string asm, list<dag> pattern, bit HasMods = 0, bit VOP3Only = 0> :
VOPAnyCommon <outs, ins, asm, pattern> {
// Using complex patterns gives VOP3 patterns a very high complexity rating,
let VOP3 = 1;
let VALU = 1;
- let AsmMatchConverter = "cvtVOP3";
+ let AsmMatchConverter =
+ !if(!eq(VOP3Only,1),
+ "cvtVOP3_only",
+ !if(!eq(HasMods,1), "cvtVOP3_2_mod", "cvtVOP3_2_nomod"));
let isCodeGenOnly = 0;
int Size = 8;
let PredicateMethod = "isImm";
let ParserMethod = "parseVOP3OptionalOps";
let RenderMethod = "addImmOperands";
+ let IsOptional = 1;
}
def ClampMatchClass : AsmOperandClass {
let PredicateMethod = "isImm";
let ParserMethod = "parseVOP3OptionalOps";
let RenderMethod = "addImmOperands";
+ let IsOptional = 1;
}
class SMRDOffsetBaseMatchClass <string predicate> : AsmOperandClass {
// Returns 1 if the source arguments have modifiers, 0 if they do not.
// XXX - do f16 instructions?
class hasModifiers<ValueType SrcVT> {
- bit ret = !if(!eq(SrcVT.Value, f32.Value), 1,
- !if(!eq(SrcVT.Value, f64.Value), 1, 0));
+ bit ret =
+ !if(!eq(SrcVT.Value, f32.Value), 1,
+ !if(!eq(SrcVT.Value, f64.Value), 1,
+ 0));
}
// Returns the input arguments for VOP[12C] instructions for the given SrcVT.
bits<1> clamp = !if(HasOutputMods, ?, 0);
}
-class VOP3_Pseudo <dag outs, dag ins, list<dag> pattern, string opName> :
- VOP3Common <outs, ins, "", pattern>,
+class VOP3_Pseudo <dag outs, dag ins, list<dag> pattern, string opName,
+ bit HasMods = 0, bit VOP3Only = 0> :
+ VOP3Common <outs, ins, "", pattern, HasMods, VOP3Only>,
VOP <opName>,
SIMCInstr<opName#"_e64", SISubtarget.NONE>,
MnemonicAlias<opName#"_e64", opName> {
field bit src0;
}
-class VOP3_Real_si <bits<9> op, dag outs, dag ins, string asm, string opName> :
- VOP3Common <outs, ins, asm, []>,
+class VOP3_Real_si <bits<9> op, dag outs, dag ins, string asm, string opName,
+ bit HasMods = 0, bit VOP3Only = 0> :
+ VOP3Common <outs, ins, asm, [], HasMods, VOP3Only>,
VOP3e <op>,
SIMCInstr<opName#"_e64", SISubtarget.SI> {
let AssemblerPredicates = [isSICI];
}
-class VOP3_Real_vi <bits<10> op, dag outs, dag ins, string asm, string opName> :
- VOP3Common <outs, ins, asm, []>,
+class VOP3_Real_vi <bits<10> op, dag outs, dag ins, string asm, string opName,
+ bit HasMods = 0, bit VOP3Only = 0> :
+ VOP3Common <outs, ins, asm, [], HasMods, VOP3Only>,
VOP3e_vi <op>,
SIMCInstr <opName#"_e64", SISubtarget.VI> {
let AssemblerPredicates = [isVI];
}
-class VOP3b_Real_si <bits<9> op, dag outs, dag ins, string asm, string opName> :
- VOP3Common <outs, ins, asm, []>,
+class VOP3b_Real_si <bits<9> op, dag outs, dag ins, string asm, string opName,
+ bit HasMods = 0, bit VOP3Only = 0> :
+ VOP3Common <outs, ins, asm, [], HasMods, VOP3Only>,
VOP3be <op>,
SIMCInstr<opName#"_e64", SISubtarget.SI> {
let AssemblerPredicates = [isSICI];
}
-class VOP3b_Real_vi <bits<10> op, dag outs, dag ins, string asm, string opName> :
- VOP3Common <outs, ins, asm, []>,
+class VOP3b_Real_vi <bits<10> op, dag outs, dag ins, string asm, string opName,
+ bit HasMods = 0, bit VOP3Only = 0> :
+ VOP3Common <outs, ins, asm, [], HasMods, VOP3Only>,
VOP3be_vi <op>,
SIMCInstr <opName#"_e64", SISubtarget.VI> {
let AssemblerPredicates = [isVI];
}
multiclass VOP3_m <vop op, dag outs, dag ins, string asm, list<dag> pattern,
- string opName, int NumSrcArgs, bit HasMods = 1> {
+ string opName, int NumSrcArgs, bit HasMods = 1, bit VOP3Only = 0> {
def "" : VOP3_Pseudo <outs, ins, pattern, opName>;
- def _si : VOP3_Real_si <op.SI3, outs, ins, asm, opName>,
+ def _si : VOP3_Real_si <op.SI3, outs, ins, asm, opName, HasMods, VOP3Only>,
VOP3DisableFields<!if(!eq(NumSrcArgs, 1), 0, 1),
!if(!eq(NumSrcArgs, 2), 0, 1),
HasMods>;
- def _vi : VOP3_Real_vi <op.VI3, outs, ins, asm, opName>,
+ def _vi : VOP3_Real_vi <op.VI3, outs, ins, asm, opName, HasMods, VOP3Only>,
VOP3DisableFields<!if(!eq(NumSrcArgs, 1), 0, 1),
!if(!eq(NumSrcArgs, 2), 0, 1),
HasMods>;
multiclass VOP3_1_m <vop op, dag outs, dag ins, string asm,
list<dag> pattern, string opName, bit HasMods = 1> {
- def "" : VOP3_Pseudo <outs, ins, pattern, opName>;
+ def "" : VOP3_Pseudo <outs, ins, pattern, opName, HasMods>;
- def _si : VOP3_Real_si <op.SI3, outs, ins, asm, opName>,
+ def _si : VOP3_Real_si <op.SI3, outs, ins, asm, opName, HasMods>,
VOP3DisableFields<0, 0, HasMods>;
- def _vi : VOP3_Real_vi <op.VI3, outs, ins, asm, opName>,
+ def _vi : VOP3_Real_vi <op.VI3, outs, ins, asm, opName, HasMods>,
VOP3DisableFields<0, 0, HasMods>;
}
multiclass VOP3SI_1_m <vop op, dag outs, dag ins, string asm,
list<dag> pattern, string opName, bit HasMods = 1> {
- def "" : VOP3_Pseudo <outs, ins, pattern, opName>;
+ def "" : VOP3_Pseudo <outs, ins, pattern, opName, HasMods>;
- def _si : VOP3_Real_si <op.SI3, outs, ins, asm, opName>,
+ def _si : VOP3_Real_si <op.SI3, outs, ins, asm, opName, HasMods>,
VOP3DisableFields<0, 0, HasMods>;
// No VI instruction. This class is for SI only.
}
list<dag> pattern, string opName, string revOp,
bit HasMods = 1> {
- def "" : VOP3_Pseudo <outs, ins, pattern, opName>,
+ def "" : VOP3_Pseudo <outs, ins, pattern, opName, HasMods>,
VOP2_REV<revOp#"_e64", !eq(revOp, opName)>;
- def _si : VOP3_Real_si <op.SI3, outs, ins, asm, opName>,
+ def _si : VOP3_Real_si <op.SI3, outs, ins, asm, opName, HasMods>,
VOP3DisableFields<1, 0, HasMods>;
- def _vi : VOP3_Real_vi <op.VI3, outs, ins, asm, opName>,
+ def _vi : VOP3_Real_vi <op.VI3, outs, ins, asm, opName, HasMods>,
VOP3DisableFields<1, 0, HasMods>;
}
list<dag> pattern, string opName, string revOp,
bit HasMods = 1> {
- def "" : VOP3_Pseudo <outs, ins, pattern, opName>,
+ def "" : VOP3_Pseudo <outs, ins, pattern, opName, HasMods>,
VOP2_REV<revOp#"_e64", !eq(revOp, opName)>;
- def _si : VOP3_Real_si <op.SI3, outs, ins, asm, opName>,
+ def _si : VOP3_Real_si <op.SI3, outs, ins, asm, opName, HasMods>,
VOP3DisableFields<1, 0, HasMods>;
// No VI instruction. This class is for SI only.
bit HasMods, bit defExec,
string revOp, list<SchedReadWrite> sched> {
- def "" : VOP3_Pseudo <outs, ins, pattern, opName>,
+ def "" : VOP3_Pseudo <outs, ins, pattern, opName, HasMods>,
VOP2_REV<revOp#"_e64", !eq(revOp, opName)> {
let Defs = !if(defExec, [EXEC], []);
let SchedRW = sched;
}
- def _si : VOP3_Real_si <op.SI3, outs, ins, asm, opName>,
+ def _si : VOP3_Real_si <op.SI3, outs, ins, asm, opName, HasMods>,
VOP3DisableFields<1, 0, HasMods> {
let Defs = !if(defExec, [EXEC], []);
let SchedRW = sched;
}
- def _vi : VOP3_Real_vi <op.VI3, outs, ins, asm, opName>,
+ def _vi : VOP3_Real_vi <op.VI3, outs, ins, asm, opName, HasMods>,
VOP3DisableFields<1, 0, HasMods> {
let Defs = !if(defExec, [EXEC], []);
let SchedRW = sched;
VOPCX <op, opName, VOPC_I1_I64_I64, COND_NULL, [Write64Bit], revOp>;
multiclass VOP3_Helper <vop3 op, string opName, dag outs, dag ins, string asm,
- list<dag> pat, int NumSrcArgs, bit HasMods> : VOP3_m <
- op, outs, ins, opName#" "#asm, pat, opName, NumSrcArgs, HasMods
+ list<dag> pat, int NumSrcArgs, bit HasMods,
+ bit VOP3Only = 0> : VOP3_m <
+ op, outs, ins, opName#" "#asm, pat, opName, NumSrcArgs, HasMods, VOP3Only
>;
multiclass VOPC_CLASS_F32 <vopc op, string opName> :
VOPCClassInst <op, opName, VOPC_I1_F64_I32, 1, [WriteDoubleAdd]>;
multiclass VOP3Inst <vop3 op, string opName, VOPProfile P,
- SDPatternOperator node = null_frag> : VOP3_Helper <
+ SDPatternOperator node = null_frag, bit VOP3Only = 0> :
+ VOP3_Helper <
op, opName, (outs P.DstRC.RegClass:$dst), P.Ins64, P.Asm64,
!if(!eq(P.NumSrcArgs, 3),
!if(P.HasModifiers,
(node (P.Src0VT (VOP3Mods0 P.Src0VT:$src0, i32:$src0_modifiers,
i1:$clamp, i32:$omod))))],
[(set P.DstVT:$dst, (node P.Src0VT:$src0))]))),
- P.NumSrcArgs, P.HasModifiers
+ P.NumSrcArgs, P.HasModifiers, VOP3Only
>;
// Special case for v_div_fmas_{f32|f64}, since it seems to be the
let isCommutable = 1 in {
defm V_ADD_F64 : VOP3Inst <vop3<0x164, 0x280>, "v_add_f64",
- VOP_F64_F64_F64, fadd
+ VOP_F64_F64_F64, fadd, 1
>;
defm V_MUL_F64 : VOP3Inst <vop3<0x165, 0x281>, "v_mul_f64",
- VOP_F64_F64_F64, fmul
+ VOP_F64_F64_F64, fmul, 1
>;
defm V_MIN_F64 : VOP3Inst <vop3<0x166, 0x282>, "v_min_f64",
- VOP_F64_F64_F64, fminnum
+ VOP_F64_F64_F64, fminnum, 1
>;
defm V_MAX_F64 : VOP3Inst <vop3<0x167, 0x283>, "v_max_f64",
- VOP_F64_F64_F64, fmaxnum
+ VOP_F64_F64_F64, fmaxnum, 1
>;
} // End isCommutable = 1
defm V_LDEXP_F64 : VOP3Inst <vop3<0x168, 0x284>, "v_ldexp_f64",
- VOP_F64_F64_I32, AMDGPUldexp
+ VOP_F64_F64_I32, AMDGPUldexp, 1
>;
} // End let SchedRW = [WriteDoubleAdd]
v_mac_legacy_f32 v1, v3, s5
// SICI: v_mac_legacy_f32_e64 v1, v3, s5 ; encoding: [0x01,0x00,0x0c,0xd2,0x03,0x0b,0x00,0x00]
-// FIXME: The error message should be: error: instruction not supported on this GPU
-// NOVI: error: invalid operand for instruction
+// NOVI: error: instruction not supported on this GPU
v_mul_legacy_f32 v1, v3, s5
// SICI: v_mul_legacy_f32_e64 v1, v3, s5 ; encoding: [0x01,0x00,0x0e,0xd2,0x03,0x0b,0x00,0x00]
// SICI: v_mad_legacy_f32 v2, v4, v6, v8 ; encoding: [0x02,0x00,0x80,0xd2,0x04,0x0d,0x22,0x04]
// VI: v_mad_legacy_f32 v2, v4, v6, v8 ; encoding: [0x02,0x00,0xc0,0xd1,0x04,0x0d,0x22,0x04]
+v_add_f64 v[0:1], v[2:3], v[5:6]
+// SICI: v_add_f64 v[0:1], v[2:3], v[5:6] ; encoding: [0x00,0x00,0xc8,0xd2,0x02,0x0b,0x02,0x00]
+// VI: v_add_f64 v[0:1], v[2:3], v[5:6] ; encoding: [0x00,0x00,0x80,0xd2,0x02,0x0b,0x02,0x00]
+v_add_f64_e64 v[0:1], v[2:3], v[5:6]
+// SICI: v_add_f64 v[0:1], v[2:3], v[5:6] ; encoding: [0x00,0x00,0xc8,0xd2,0x02,0x0b,0x02,0x00]
+// VI: v_add_f64 v[0:1], v[2:3], v[5:6] ; encoding: [0x00,0x00,0x80,0xd2,0x02,0x0b,0x02,0x00]
+v_add_f64 v[0:1], -v[2:3], v[5:6]
+// SICI: v_add_f64 v[0:1], -v[2:3], v[5:6] ; encoding: [0x00,0x00,0xc8,0xd2,0x02,0x0b,0x02,0x20]
+// VI: v_add_f64 v[0:1], -v[2:3], v[5:6] ; encoding: [0x00,0x00,0x80,0xd2,0x02,0x0b,0x02,0x20]
+v_add_f64_e64 v[0:1], -v[2:3], v[5:6]
+// SICI: v_add_f64 v[0:1], -v[2:3], v[5:6] ; encoding: [0x00,0x00,0xc8,0xd2,0x02,0x0b,0x02,0x20]
+// VI: v_add_f64 v[0:1], -v[2:3], v[5:6] ; encoding: [0x00,0x00,0x80,0xd2,0x02,0x0b,0x02,0x20]
+
+v_add_f64 v[0:1], v[2:3], -v[5:6]
+// SICI: v_add_f64 v[0:1], v[2:3], -v[5:6] ; encoding: [0x00,0x00,0xc8,0xd2,0x02,0x0b,0x02,0x40]
+// VI: v_add_f64 v[0:1], v[2:3], -v[5:6] ; encoding: [0x00,0x00,0x80,0xd2,0x02,0x0b,0x02,0x40]
+
+v_add_f64_e64 v[0:1], v[2:3], -v[5:6]
+// SICI: v_add_f64 v[0:1], v[2:3], -v[5:6] ; encoding: [0x00,0x00,0xc8,0xd2,0x02,0x0b,0x02,0x40]
+// VI: v_add_f64 v[0:1], v[2:3], -v[5:6] ; encoding: [0x00,0x00,0x80,0xd2,0x02,0x0b,0x02,0x40]
+
+v_add_f64 v[0:1], |v[2:3]|, v[5:6]
+// SICI: v_add_f64 v[0:1], |v[2:3]|, v[5:6] ; encoding: [0x00,0x01,0xc8,0xd2,0x02,0x0b,0x02,0x00]
+// VI: v_add_f64 v[0:1], |v[2:3]|, v[5:6] ; encoding: [0x00,0x01,0x80,0xd2,0x02,0x0b,0x02,0x00]
+
+v_add_f64_e64 v[0:1], |v[2:3]|, v[5:6]
+// SICI: v_add_f64 v[0:1], |v[2:3]|, v[5:6] ; encoding: [0x00,0x01,0xc8,0xd2,0x02,0x0b,0x02,0x00]
+// VI: v_add_f64 v[0:1], |v[2:3]|, v[5:6] ; encoding: [0x00,0x01,0x80,0xd2,0x02,0x0b,0x02,0x00]
+
+v_add_f64 v[0:1], v[2:3], |v[5:6]|
+// SICI: v_add_f64 v[0:1], v[2:3], |v[5:6]| ; encoding: [0x00,0x02,0xc8,0xd2,0x02,0x0b,0x02,0x00]
+// VI: v_add_f64 v[0:1], v[2:3], |v[5:6]| ; encoding: [0x00,0x02,0x80,0xd2,0x02,0x0b,0x02,0x00]
+
+v_add_f64_e64 v[0:1], v[2:3], |v[5:6]|
+// SICI: v_add_f64 v[0:1], v[2:3], |v[5:6]| ; encoding: [0x00,0x02,0xc8,0xd2,0x02,0x0b,0x02,0x00]
+// VI: v_add_f64 v[0:1], v[2:3], |v[5:6]| ; encoding: [0x00,0x02,0x80,0xd2,0x02,0x0b,0x02,0x00]
+
+v_add_f64 v[0:1], -v[2:3], |v[5:6]| clamp mul:4
+// SICI: v_add_f64 v[0:1], -v[2:3], |v[5:6]| clamp mul:4 ; encoding: [0x00,0x0a,0xc8,0xd2,0x02,0x0b,0x02,0x30]
+// VI: v_add_f64 v[0:1], -v[2:3], |v[5:6]| clamp mul:4 ; encoding: [0x00,0x82,0x80,0xd2,0x02,0x0b,0x02,0x30]
+
+v_add_f64_e64 v[0:1], -v[2:3], |v[5:6]| clamp mul:4
+// SICI: v_add_f64 v[0:1], -v[2:3], |v[5:6]| clamp mul:4 ; encoding: [0x00,0x0a,0xc8,0xd2,0x02,0x0b,0x02,0x30]
+// VI: v_add_f64 v[0:1], -v[2:3], |v[5:6]| clamp mul:4 ; encoding: [0x00,0x82,0x80,0xd2,0x02,0x0b,0x02,0x30]