return Kind | (NumOps << 3);
}
+ static bool isRegDefKind(unsigned Flag){ return getKind(Flag) == Kind_RegDef;}
+ static bool isImmKind(unsigned Flag) { return getKind(Flag) == Kind_Imm; }
+ static bool isMemKind(unsigned Flag) { return getKind(Flag) == Kind_Mem; }
+ static bool isRegDefEarlyClobberKind(unsigned Flag) {
+ return getKind(Flag) == Kind_RegDefEarlyClobber;
+ }
+ static bool isClobberKind(unsigned Flag) {
+ return getKind(Flag) == Kind_Clobber;
+ }
+
/// getFlagWordForMatchingOp - Augment an existing flag word returned by
/// getFlagWord with information indicating that this input operand is tied
/// to a previous output operand.
static unsigned getFlagWordForRegClass(unsigned InputFlag, unsigned RC) {
// Store RC + 1, reserve the value 0 to mean 'no register class'.
++RC;
+ assert(!isImmKind(InputFlag) && "Immediates cannot have a register class");
+ assert(!isMemKind(InputFlag) && "Memory operand cannot have a register class");
assert(RC <= 0x7fff && "Too large register class ID");
assert((InputFlag & ~0xffff) == 0 && "High bits already contain data");
return InputFlag | (RC << 16);
/// Augment an existing flag word returned by getFlagWord with the constraint
/// code for a memory constraint.
static unsigned getFlagWordForMem(unsigned InputFlag, unsigned Constraint) {
+ assert(isMemKind(InputFlag) && "InputFlag is not a memory constraint!");
assert(Constraint <= 0x7fff && "Too large a memory constraint ID");
assert(Constraint <= Constraints_Max && "Unknown constraint ID");
assert((InputFlag & ~0xffff) == 0 && "High bits already contain data");
return Flags & 7;
}
- static bool isRegDefKind(unsigned Flag){ return getKind(Flag) == Kind_RegDef;}
- static bool isImmKind(unsigned Flag) { return getKind(Flag) == Kind_Imm; }
- static bool isMemKind(unsigned Flag) { return getKind(Flag) == Kind_Mem; }
- static bool isRegDefEarlyClobberKind(unsigned Flag) {
- return getKind(Flag) == Kind_RegDefEarlyClobber;
- }
- static bool isClobberKind(unsigned Flag) {
- return getKind(Flag) == Kind_Clobber;
- }
-
static unsigned getMemoryConstraintID(unsigned Flag) {
assert(isMemKind(Flag));
return (Flag >> Constraints_ShiftAmount) & 0x7fff;
unsigned Flag = getOperand(FlagIdx).getImm();
unsigned RCID;
- if (InlineAsm::hasRegClassConstraint(Flag, RCID))
+ if ((InlineAsm::getKind(Flag) == InlineAsm::Kind_RegUse ||
+ InlineAsm::getKind(Flag) == InlineAsm::Kind_RegDef ||
+ InlineAsm::getKind(Flag) == InlineAsm::Kind_RegDefEarlyClobber) &&
+ InlineAsm::hasRegClassConstraint(Flag, RCID))
return TRI->getRegClass(RCID);
// Assume that all registers in a memory operand are pointers.
}
unsigned RCID = 0;
- if (InlineAsm::hasRegClassConstraint(Flag, RCID)) {
+ if (!InlineAsm::isImmKind(Flag) && !InlineAsm::isMemKind(Flag) &&
+ InlineAsm::hasRegClassConstraint(Flag, RCID)) {
if (TRI) {
OS << ':' << TRI->getRegClassName(TRI->getRegClass(RCID));
} else
OS << ":RC" << RCID;
}
+ if (InlineAsm::isMemKind(Flag)) {
+ unsigned MCID = InlineAsm::getMemoryConstraintID(Flag);
+ switch (MCID) {
+ case InlineAsm::Constraint_es: OS << ":es"; break;
+ case InlineAsm::Constraint_i: OS << ":i"; break;
+ case InlineAsm::Constraint_m: OS << ":m"; break;
+ case InlineAsm::Constraint_o: OS << ":o"; break;
+ case InlineAsm::Constraint_v: OS << ":v"; break;
+ case InlineAsm::Constraint_Q: OS << ":Q"; break;
+ case InlineAsm::Constraint_R: OS << ":R"; break;
+ case InlineAsm::Constraint_S: OS << ":S"; break;
+ case InlineAsm::Constraint_T: OS << ":T"; break;
+ case InlineAsm::Constraint_Um: OS << ":Um"; break;
+ case InlineAsm::Constraint_Un: OS << ":Un"; break;
+ case InlineAsm::Constraint_Uq: OS << ":Uq"; break;
+ case InlineAsm::Constraint_Us: OS << ":Us"; break;
+ case InlineAsm::Constraint_Ut: OS << ":Ut"; break;
+ case InlineAsm::Constraint_Uv: OS << ":Uv"; break;
+ case InlineAsm::Constraint_Uy: OS << ":Uy"; break;
+ case InlineAsm::Constraint_X: OS << ":X"; break;
+ case InlineAsm::Constraint_Z: OS << ":Z"; break;
+ case InlineAsm::Constraint_ZC: OS << ":ZC"; break;
+ case InlineAsm::Constraint_Zy: OS << ":Zy"; break;
+ default: OS << ":?"; break;
+ }
+ }
+
unsigned TiedTo = 0;
if (InlineAsm::isUseOperandTiedToDef(Flag, TiedTo))
OS << " tiedto:$" << TiedTo;
// Otherwise, this is a memory operand. Ask the target to select it.
std::vector<SDValue> SelOps;
- if (SelectInlineAsmMemoryOperand(InOps[i+1],
- InlineAsm::getMemoryConstraintID(Flags),
- SelOps))
+ unsigned ConstraintID = InlineAsm::getMemoryConstraintID(Flags);
+ if (SelectInlineAsmMemoryOperand(InOps[i+1], ConstraintID, SelOps))
report_fatal_error("Could not match memory address. Inline asm"
" failure!");
// Add this to the output node.
unsigned NewFlags =
InlineAsm::getFlagWord(InlineAsm::Kind_Mem, SelOps.size());
+ NewFlags = InlineAsm::getFlagWordForMem(NewFlags, ConstraintID);
Ops.push_back(CurDAG->getTargetConstant(NewFlags, DL, MVT::i32));
Ops.insert(Ops.end(), SelOps.begin(), SelOps.end());
i += 2;
return &Mips::GPR64RegClass;
}
-/// Get the size of the offset supported by the given load/store.
+/// Get the size of the offset supported by the given load/store/inline asm.
/// The result includes the effects of any scale factors applied to the
/// instruction immediate.
-static inline unsigned getLoadStoreOffsetSizeInBits(const unsigned Opcode) {
+static inline unsigned getLoadStoreOffsetSizeInBits(const unsigned Opcode,
+ MachineOperand MO) {
switch (Opcode) {
case Mips::LD_B:
case Mips::ST_B:
case Mips::LD_D:
case Mips::ST_D:
return 10 + 3 /* scale factor */;
+ case Mips::LL:
+ case Mips::LL64:
+ case Mips::LLD:
+ case Mips::LLE:
+ case Mips::SC:
+ case Mips::SC64:
+ case Mips::SCD:
+ case Mips::SCE:
+ return 16;
+ case Mips::LLE_MM:
+ case Mips::LLE_MMR6:
+ case Mips::LL_MM:
+ case Mips::SCE_MM:
+ case Mips::SCE_MMR6:
+ case Mips::SC_MM:
+ return 12;
+ case Mips::LL64_R6:
+ case Mips::LL_R6:
+ case Mips::LLD_R6:
+ case Mips::SC64_R6:
+ case Mips::SCD_R6:
+ case Mips::SC_R6:
+ return 9;
+ case Mips::INLINEASM: {
+ unsigned ConstraintID = InlineAsm::getMemoryConstraintID(MO.getImm());
+ switch (ConstraintID) {
+ case InlineAsm::Constraint_ZC: {
+ const MipsSubtarget &Subtarget = MO.getParent()
+ ->getParent()
+ ->getParent()
+ ->getSubtarget<MipsSubtarget>();
+ if (Subtarget.inMicroMipsMode())
+ return 12;
+
+ if (Subtarget.hasMips32r6())
+ return 9;
+
+ return 16;
+ }
+ default:
+ return 16;
+ }
+ }
default:
return 16;
}
// Make sure Offset fits within the field available.
// For MSA instructions, this is a 10-bit signed immediate (scaled by
// element size), otherwise it is a 16-bit signed immediate.
- unsigned OffsetBitSize = getLoadStoreOffsetSizeInBits(MI.getOpcode());
+ unsigned OffsetBitSize =
+ getLoadStoreOffsetSizeInBits(MI.getOpcode(), MI.getOperand(OpNo - 1));
unsigned OffsetAlign = getLoadStoreOffsetAlign(MI.getOpcode());
if (OffsetBitSize < 16 && isInt<16>(Offset) &&
--- /dev/null
+; RUN: llc -march=mips -mcpu=mips32r6 < %s | FileCheck %s --check-prefixes=ALL,R6
+; RUN: llc -march=mips -mcpu=mips64r6 -target-abi=n64 < %s | FileCheck %s --check-prefixes=ALL,R6
+; RUN: llc -march=mips -mcpu=mips32 < %s | FileCheck %s --check-prefixes=ALL,PRER6
+; RUN: llc -march=mips -mcpu=mips64 -target-abi=n64 < %s | FileCheck %s --check-prefixes=ALL,PRER6
+
+
+%struct.anon = type { [63 x i32], i32, i32 }
+
+define i32 @Atomic() {
+; CHECK-LABEL: Atomic:
+entry:
+ %s = alloca %struct.anon, align 4
+ %0 = bitcast %struct.anon* %s to i8*
+ %count = getelementptr inbounds %struct.anon, %struct.anon* %s, i64 0, i32 1
+ store i32 0, i32* %count, align 4
+; R6: addiu $[[R0:[0-9a-z]+]], $sp, {{[0-9]+}}
+
+; ALL: #APP
+
+; R6: ll ${{[0-9a-z]+}}, 0($[[R0]])
+; R6: sc ${{[0-9a-z]+}}, 0($[[R0]])
+
+; PRER6: ll ${{[0-9a-z]+}}, {{[0-9]+}}(${{[0-9a-z]+}})
+; PRER6: sc ${{[0-9a-z]+}}, {{[0-9]+}}(${{[0-9a-z]+}})
+
+; ALL: #NO_APP
+
+ %1 = call { i32, i32 } asm sideeffect ".set push\0A.set noreorder\0A1:\0All $0, $2\0Aaddu $1, $0, $3\0Asc $1, $2\0Abeqz $1, 1b\0Aaddu $1, $0, $3\0A.set pop\0A", "=&r,=&r,=*^ZC,Ir,*^ZC,~{memory},~{$1}"(i32* %count, i32 10, i32* %count)
+ %asmresult1.i = extractvalue { i32, i32 } %1, 1
+ %cmp = icmp ne i32 %asmresult1.i, 10
+ %conv = zext i1 %cmp to i32
+ %call2 = call i32 @f(i32 signext %conv)
+ ret i32 %call2
+}
+
+declare i32 @f(i32 signext)