__asm add ebx, dword ptr gVar[271 - 82 + 81 + ebx]
// CHECK: add dword ptr ${{[0-9]}}[ebx + $$828], ebx
__asm add dword ptr [ebx + gVar + 828], ebx
- // CHECK: add ecx, dword ptr ${{[0-9]}}[ecx + ecx * $$4 + $$4590]
+ // CHECK: add ecx, dword ptr ${{{[0-9]}}:P}[ecx + ecx * $$4 + $$4590]
__asm add ecx, dword ptr gVar[4590 + ecx + ecx*4]
- // CHECK: add dword ptr ${{[0-9]}}[ecx + ecx * $$8 + $$73], ecx
+ // CHECK: add dword ptr ${{{[0-9]}}:P}[ecx + ecx * $$8 + $$73], ecx
__asm add dword ptr [gVar + ecx + 45 + 23 - 53 + 60 - 2 + ecx*8], ecx
- // CHECK: add ${{[0-9]}}[ecx + ebx + $$7], eax
+ // CHECK: add ${{{[0-9]}}:P}[ecx + ebx + $$7], eax
__asm add 1 + 1 + 2 + 3[gVar + ecx + ebx], eax
}
/// isMem - Is this a memory operand?
virtual bool isMem() const = 0;
+ /// isMemUseUpRegs - Is memory operand use up regs, for example, intel MS
+ /// inline asm may use ARR[baseReg + IndexReg + ...] which may use up regs
+ /// in [...] expr, so ARR[baseReg + IndexReg + ...] can not use extra reg
+ /// for ARR. For example, calculating ARR address to a reg or use another
+ /// base reg in PIC model.
+ virtual bool isMemUseUpRegs() const { return false; }
+
/// getStartLoc - Get the location of the first token of this operand.
virtual SMLoc getStartLoc() const = 0;
/// getEndLoc - Get the location of the last token of this operand.
int64_t Val;
StringRef Label;
IntelExpr IntelExp;
+ bool IntelExpRestricted;
public:
- AsmRewrite(AsmRewriteKind kind, SMLoc loc, unsigned len = 0, int64_t val = 0)
- : Kind(kind), Loc(loc), Len(len), Done(false), Val(val) {}
+ AsmRewrite(AsmRewriteKind kind, SMLoc loc, unsigned len = 0, int64_t val = 0,
+ bool Restricted = false)
+ : Kind(kind), Loc(loc), Len(len), Done(false), Val(val) {
+ IntelExpRestricted = Restricted;
+ }
AsmRewrite(AsmRewriteKind kind, SMLoc loc, unsigned len, StringRef label)
: AsmRewrite(kind, loc, len) { Label = label; }
AsmRewrite(SMLoc loc, unsigned len, IntelExpr exp)
}
bool isOutput = (i == 1) && Desc.mayStore();
+ bool Restricted = Operand.isMemUseUpRegs();
SMLoc Start = SMLoc::getFromPointer(SymName.data());
if (isOutput) {
++InputIdx;
OutputDecls.push_back(OpDecl);
OutputDeclsAddressOf.push_back(Operand.needAddressOf());
OutputConstraints.push_back(("=" + Constraint).str());
- AsmStrRewrites.emplace_back(AOK_Output, Start, SymName.size());
+ AsmStrRewrites.emplace_back(AOK_Output, Start, SymName.size(), 0,
+ Restricted);
} else {
InputDecls.push_back(OpDecl);
InputDeclsAddressOf.push_back(Operand.needAddressOf());
InputConstraints.push_back(Constraint.str());
if (Desc.OpInfo[i - 1].isBranchTarget())
- AsmStrRewrites.emplace_back(AOK_CallInput, Start, SymName.size());
+ AsmStrRewrites.emplace_back(AOK_CallInput, Start, SymName.size(), 0,
+ Restricted);
else
- AsmStrRewrites.emplace_back(AOK_Input, Start, SymName.size());
+ AsmStrRewrites.emplace_back(AOK_Input, Start, SymName.size(), 0,
+ Restricted);
}
}
OS << Ctx.getAsmInfo()->getPrivateLabelPrefix() << AR.Label;
break;
case AOK_Input:
- OS << '$' << InputIdx++;
+ if (AR.IntelExpRestricted)
+ OS << "${" << InputIdx++ << ":P}";
+ else
+ OS << '$' << InputIdx++;
break;
case AOK_CallInput:
OS << "${" << InputIdx++ << ":P}";
break;
case AOK_Output:
- OS << '$' << OutputIdx++;
+ if (AR.IntelExpRestricted)
+ OS << "${" << OutputIdx++ << ":P}";
+ else
+ OS << '$' << OutputIdx++;
break;
case AOK_SizeDirective:
switch (AR.Val) {
// registers in a mmory expression, and though unaccessible via rip/eip.
if (IsGlobalLV && (BaseReg || IndexReg)) {
Operands.push_back(X86Operand::CreateMem(getPointerWidth(), Disp, Start,
- End, Size, Identifier, Decl));
+ End, Size, Identifier, Decl, 0,
+ BaseReg && IndexReg));
return false;
}
// Otherwise, we set the base register to a non-zero value
/// If the memory operand is unsized and there are multiple instruction
/// matches, prefer the one with this size.
unsigned FrontendSize;
+
+ /// This used for inline asm which may specify base reg and index reg for
+ /// MemOp. e.g. ARR[eax + ecx*4], so no extra reg can be used for MemOp.
+ bool UseUpRegs;
};
union {
return isAbsMem() && Mem.ModeSize == 16;
}
+ bool isMemUseUpRegs() const {
+ return Mem.UseUpRegs;
+ }
+
bool isSrcIdx() const {
return !getMemIndexReg() && getMemScale() == 1 &&
(getMemBaseReg() == X86::RSI || getMemBaseReg() == X86::ESI ||
static std::unique_ptr<X86Operand>
CreateMem(unsigned ModeSize, const MCExpr *Disp, SMLoc StartLoc, SMLoc EndLoc,
unsigned Size = 0, StringRef SymName = StringRef(),
- void *OpDecl = nullptr, unsigned FrontendSize = 0) {
+ void *OpDecl = nullptr, unsigned FrontendSize = 0,
+ bool UseUpRegs = false) {
auto Res = std::make_unique<X86Operand>(Memory, StartLoc, EndLoc);
Res->Mem.SegReg = 0;
Res->Mem.Disp = Disp;
Res->Mem.Size = Size;
Res->Mem.ModeSize = ModeSize;
Res->Mem.FrontendSize = FrontendSize;
+ Res->Mem.UseUpRegs = UseUpRegs;
Res->SymName = SymName;
Res->OpDecl = OpDecl;
Res->AddressOf = false;
SMLoc EndLoc, unsigned Size = 0,
unsigned DefaultBaseReg = X86::NoRegister,
StringRef SymName = StringRef(), void *OpDecl = nullptr,
- unsigned FrontendSize = 0) {
+ unsigned FrontendSize = 0, bool UseUpRegs = false) {
// We should never just have a displacement, that should be parsed as an
// absolute memory operand.
assert((SegReg || BaseReg || IndexReg || DefaultBaseReg) &&
Res->Mem.Size = Size;
Res->Mem.ModeSize = ModeSize;
Res->Mem.FrontendSize = FrontendSize;
+ Res->Mem.UseUpRegs = UseUpRegs;
Res->SymName = SymName;
Res->OpDecl = OpDecl;
Res->AddressOf = false;